diff --git "a/log_node23.txt" "b/log_node23.txt" new file mode 100644--- /dev/null +++ "b/log_node23.txt" @@ -0,0 +1,33885 @@ ++ echo Logging output to /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743//log_node23.txt +Logging output to /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743//log_node23.txt ++ export ASCEND_PROCESS_LOG_PATH=/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743//ascend/23 ++ ASCEND_PROCESS_LOG_PATH=/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743//ascend/23 ++ mkdir -p /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743//ascend/23 ++ DATA_PATH=/local_disk/cognitron_vl//configs/lcvlm_finetune_stage4.yaml ++ TOKENIZER_PATH=/data_4/models/Qwen/Qwen2.5-14B-Instruct/ ++ CKPT_LOAD_DIR=/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/ ++ VIT_CKPT_LOAD_DIR=/ ++ CKPT_SAVE_DIR=/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743// ++ rsync -avh /local_disk/cognitron_vl//configs/lcvlm_finetune_stage4.yaml /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743/ +sending incremental file list + +sent 71 bytes received 12 bytes 166.00 bytes/sec +total size is 23.84K speedup is 287.17 ++ cd /local_disk/cognitron_vl/ ++ rm -fr datasets ++ mkdir -p datasets ++ ln -s /data/data/ datasets/CV ++ ln -s /data/data/LLM datasets/LLM ++ ln -s /data/data/LMM datasets/LMM ++ source /local_disk/cognitron_vl//scripts/set_env_mg_npu.sh +++ source /usr/local/Ascend/driver/bin/setenv.bash ++++ DEP_INFO_FILE=/etc/ascend_install.info ++++ [[ -f /etc/ascend_install.info ]] ++++ . /etc/ascend_install.info ++++ DRV_LIB64_COMMON_LDPATH=/driver/lib64/common ++++ DRV_LIB64_DRV_LDPATH=/driver/lib64/driver ++++ DRV_LIB64_LDPATH=/driver/lib64 ++++ export LD_LIBRARY_PATH=/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ LD_LIBRARY_PATH=/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ export PATH=/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/root/miniconda3/envs/py38/bin:/root/miniconda3/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin ++++ PATH=/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/root/miniconda3/envs/py38/bin:/root/miniconda3/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin +++ source /usr/local/Ascend/ascend-toolkit/set_env.sh ++++ export LD_LIBRARY_PATH=/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ LD_LIBRARY_PATH=/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ export ASCEND_TOOLKIT_HOME=/usr/local/Ascend/ascend-toolkit/latest ++++ ASCEND_TOOLKIT_HOME=/usr/local/Ascend/ascend-toolkit/latest +++++ arch ++++ export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ export PYTHONPATH=/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe: ++++ PYTHONPATH=/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe: ++++ export PATH=/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/root/miniconda3/envs/py38/bin:/root/miniconda3/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin ++++ PATH=/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/root/miniconda3/envs/py38/bin:/root/miniconda3/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin ++++ export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest ++++ ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest ++++ export ASCEND_OPP_PATH=/usr/local/Ascend/ascend-toolkit/latest/opp ++++ ASCEND_OPP_PATH=/usr/local/Ascend/ascend-toolkit/latest/opp ++++ export TOOLCHAIN_HOME=/usr/local/Ascend/ascend-toolkit/latest/toolkit ++++ TOOLCHAIN_HOME=/usr/local/Ascend/ascend-toolkit/latest/toolkit ++++ export ASCEND_HOME_PATH=/usr/local/Ascend/ascend-toolkit/latest ++++ ASCEND_HOME_PATH=/usr/local/Ascend/ascend-toolkit/latest +++ export HCCL_CONNECT_TIMEOUT=7200 +++ HCCL_CONNECT_TIMEOUT=7200 +++ export HCCL_EXEC_TIMEOUT=7200 +++ HCCL_EXEC_TIMEOUT=7200 +++ export COMBINED_ENABLE=1 +++ COMBINED_ENABLE=1 +++ export MULTI_STREAM_MEMORY_REUSE=1 +++ MULTI_STREAM_MEMORY_REUSE=1 +++ export HCCL_RDMA_TC=160 +++ HCCL_RDMA_TC=160 +++ export HCCL_RDMA_SL=5 +++ HCCL_RDMA_SL=5 +++ export HCCL_INTRA_PCIE_ENABLE=0 +++ HCCL_INTRA_PCIE_ENABLE=0 +++ export HCCL_INTRA_ROCE_ENABLE=1 +++ HCCL_INTRA_ROCE_ENABLE=1 +++ export HCCL_RDMA_TIMEOUT=20 +++ HCCL_RDMA_TIMEOUT=20 +++ export INF_NAN_MODE_ENABLE=1 +++ INF_NAN_MODE_ENABLE=1 +++ export DISTRIBUTED_BACKEND=hccl +++ DISTRIBUTED_BACKEND=hccl +++ export ASCEND_LAUNCH_BLOCKING=0 +++ ASCEND_LAUNCH_BLOCKING=0 +++ export ASCEND_SLOG_PRINT_TO_STDOUT=0 +++ ASCEND_SLOG_PRINT_TO_STDOUT=0 +++ export ASCEND_GLOBAL_LOG_LEVEL=3 +++ ASCEND_GLOBAL_LOG_LEVEL=3 +++ export ASCEND_GLOBAL_EVENT_ENABLE=0 +++ ASCEND_GLOBAL_EVENT_ENABLE=0 +++ export TASK_QUEUE_ENABLE=1 +++ TASK_QUEUE_ENABLE=1 +++ export PTCOPY_ENABLE=1 +++ PTCOPY_ENABLE=1 +++ export COMBINED_ENABLE=1 +++ COMBINED_ENABLE=1 +++ export DYNAMIC_OP=ADD#MUL +++ DYNAMIC_OP=ADD#MUL +++ export HCCL_WHITELIST_DISABLE=1 +++ HCCL_WHITELIST_DISABLE=1 +++ export HCCL_CONNECT_TIMEOUT=7200 +++ HCCL_CONNECT_TIMEOUT=7200 +++ export HCCL_WHITELIST_DISABLE=1 +++ HCCL_WHITELIST_DISABLE=1 +++ export CUDA_DEVICE_MAX_CONNECTIONS=1 +++ CUDA_DEVICE_MAX_CONNECTIONS=1 +++ pip3 install --no-index --find-links=/data/software/ -r requirements_npu.txt +Looking in links: /data/software/ +Processing data/software/expecttest-0.2.1-py3-none-any.whl (from -r requirements_npu.txt (line 1)) +Requirement already satisfied: peft in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 2)) (0.7.0) +Processing data/software/XlsxWriter-3.2.0-py3-none-any.whl (from -r requirements_npu.txt (line 3)) +Requirement already satisfied: termcolor in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 4)) (2.4.0) +Requirement already satisfied: tabulate in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 5)) (0.9.0) +Processing data/software/tiktoken-0.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (from -r requirements_npu.txt (line 6)) +Requirement already satisfied: matplotlib in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 7)) (3.7.5) +Processing data/software/datasets-3.0.0-py3-none-any.whl (from -r requirements_npu.txt (line 8)) +Requirement already satisfied: einops in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 9)) (0.7.0) +Processing data/software/pybind11-2.13.6-py3-none-any.whl (from -r requirements_npu.txt (line 10)) +Requirement already satisfied: tensorboardX in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 11)) (2.6.2.2) +Processing data/software/pyarrow-17.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (from -r requirements_npu.txt (line 12)) +Requirement already satisfied: transformers>=4.40.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 13)) (4.40.1) +Requirement already satisfied: deepspeed>=0.14.2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 14)) (0.14.5) +Processing data/software/accelerate-0.34.2-py3-none-any.whl (from -r requirements_npu.txt (line 15)) +Requirement already satisfied: timm in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 16)) (0.9.16) +Processing data/software/flask-3.0.3-py3-none-any.whl (from -r requirements_npu.txt (line 17)) +Processing data/software/Flask_RESTful-0.3.10-py2.py3-none-any.whl (from -r requirements_npu.txt (line 18)) +Processing data/software/decord-0.6.0-py3-none-manylinux2010_x86_64.whl (from -r requirements_npu.txt (line 19)) +Processing data/software/natsort-8.4.0-py3-none-any.whl (from -r requirements_npu.txt (line 20)) +Requirement already satisfied: numpy>=1.17 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (1.24.4) +Requirement already satisfied: packaging>=20.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (23.2) +Requirement already satisfied: psutil in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (5.9.8) +Requirement already satisfied: pyyaml in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (5.4.1) +Requirement already satisfied: torch>=1.13.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (2.1.0+cpu) +Requirement already satisfied: tqdm in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (4.66.2) +Requirement already satisfied: safetensors in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (0.4.2) +Requirement already satisfied: huggingface-hub>=0.17.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (0.20.3) +Requirement already satisfied: regex>=2022.1.18 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from tiktoken->-r requirements_npu.txt (line 6)) (2023.12.25) +Requirement already satisfied: requests>=2.26.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from tiktoken->-r requirements_npu.txt (line 6)) (2.31.0) +Requirement already satisfied: contourpy>=1.0.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (1.1.1) +Requirement already satisfied: cycler>=0.10 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (0.12.1) +Requirement already satisfied: fonttools>=4.22.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (4.49.0) +Requirement already satisfied: kiwisolver>=1.0.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (1.4.5) +Requirement already satisfied: pillow>=6.2.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (10.2.0) +Requirement already satisfied: pyparsing>=2.3.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (3.1.1) +Requirement already satisfied: python-dateutil>=2.7 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (2.8.2) +Requirement already satisfied: importlib-resources>=3.2.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (6.1.2) +Requirement already satisfied: filelock in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (3.13.1) +Requirement already satisfied: dill<0.3.9,>=0.3.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (0.3.7) +Requirement already satisfied: pandas in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (2.0.3) +Processing data/software/requests-2.32.3-py3-none-any.whl (from tiktoken->-r requirements_npu.txt (line 6)) +Processing data/software/tqdm-4.67.1-py3-none-any.whl (from peft->-r requirements_npu.txt (line 2)) +Requirement already satisfied: xxhash in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (3.4.1) +Requirement already satisfied: multiprocess in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (0.70.15) +Requirement already satisfied: fsspec<=2024.6.1,>=2023.1.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from fsspec[http]<=2024.6.1,>=2023.1.0->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (2023.10.0) +Requirement already satisfied: aiohttp in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (3.9.3) +Processing data/software/huggingface_hub-0.26.2-py3-none-any.whl (from peft->-r requirements_npu.txt (line 2)) +Requirement already satisfied: protobuf>=3.20 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from tensorboardX->-r requirements_npu.txt (line 11)) (4.25.3) +Requirement already satisfied: tokenizers<0.20,>=0.19 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from transformers>=4.40.1->-r requirements_npu.txt (line 13)) (0.19.1) +Requirement already satisfied: hjson in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from deepspeed>=0.14.2->-r requirements_npu.txt (line 14)) (3.1.0) +Requirement already satisfied: ninja in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from deepspeed>=0.14.2->-r requirements_npu.txt (line 14)) (1.11.1.1) +Requirement already satisfied: nvidia-ml-py in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from deepspeed>=0.14.2->-r requirements_npu.txt (line 14)) (12.560.30) +Requirement already satisfied: py-cpuinfo in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from deepspeed>=0.14.2->-r requirements_npu.txt (line 14)) (9.0.0) +Requirement already satisfied: pydantic in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from deepspeed>=0.14.2->-r requirements_npu.txt (line 14)) (1.10.15) +Processing data/software/safetensors-0.4.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (from peft->-r requirements_npu.txt (line 2)) +Requirement already satisfied: torchvision in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from timm->-r requirements_npu.txt (line 16)) (0.16.0) +Requirement already satisfied: Werkzeug>=3.0.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask->-r requirements_npu.txt (line 17)) (3.0.1) +Requirement already satisfied: Jinja2>=3.1.2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask->-r requirements_npu.txt (line 17)) (3.1.3) +Processing data/software/itsdangerous-2.2.0-py3-none-any.whl (from flask->-r requirements_npu.txt (line 17)) +Requirement already satisfied: click>=8.1.3 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask->-r requirements_npu.txt (line 17)) (8.1.7) +Processing data/software/blinker-1.8.2-py3-none-any.whl (from flask->-r requirements_npu.txt (line 17)) +Requirement already satisfied: importlib-metadata>=3.6.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask->-r requirements_npu.txt (line 17)) (7.0.1) +Processing data/software/aniso8601-9.0.1-py2.py3-none-any.whl (from flask_restful->-r requirements_npu.txt (line 18)) +Requirement already satisfied: six>=1.3.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask_restful->-r requirements_npu.txt (line 18)) (1.16.0) +Requirement already satisfied: pytz in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask_restful->-r requirements_npu.txt (line 18)) (2024.1) +Requirement already satisfied: aiosignal>=1.1.2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (1.3.1) +Requirement already satisfied: attrs>=17.3.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (23.2.0) +Requirement already satisfied: frozenlist>=1.1.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (1.4.1) +Requirement already satisfied: multidict<7.0,>=4.5 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (6.0.5) +Requirement already satisfied: yarl<2.0,>=1.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (1.9.4) +Requirement already satisfied: async-timeout<5.0,>=4.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (4.0.3) +Requirement already satisfied: typing-extensions>=3.7.4.3 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from huggingface-hub>=0.17.0->peft->-r requirements_npu.txt (line 2)) (4.10.0) +Requirement already satisfied: zipp>=0.5 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from importlib-metadata>=3.6.0->flask->-r requirements_npu.txt (line 17)) (3.17.0) +Requirement already satisfied: MarkupSafe>=2.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from Jinja2>=3.1.2->flask->-r requirements_npu.txt (line 17)) (2.1.5) +Requirement already satisfied: charset-normalizer<4,>=2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests>=2.26.0->tiktoken->-r requirements_npu.txt (line 6)) (3.3.2) +Requirement already satisfied: idna<4,>=2.5 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests>=2.26.0->tiktoken->-r requirements_npu.txt (line 6)) (3.6) +Requirement already satisfied: urllib3<3,>=1.21.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests>=2.26.0->tiktoken->-r requirements_npu.txt (line 6)) (1.26.18) +Requirement already satisfied: certifi>=2017.4.17 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests>=2.26.0->tiktoken->-r requirements_npu.txt (line 6)) (2024.2.2) +Requirement already satisfied: sympy in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from torch>=1.13.0->peft->-r requirements_npu.txt (line 2)) (1.4) +Requirement already satisfied: networkx in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from torch>=1.13.0->peft->-r requirements_npu.txt (line 2)) (3.1) +Requirement already satisfied: tzdata>=2022.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from pandas->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (2024.1) +Requirement already satisfied: mpmath>=0.19 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from sympy->torch>=1.13.0->peft->-r requirements_npu.txt (line 2)) (1.3.0) +DEPRECATION: apex 0.1-ascend-20240523 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of apex or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063 +Installing collected packages: aniso8601, xlsxwriter, tqdm, safetensors, requests, pybind11, pyarrow, natsort, itsdangerous, expecttest, decord, blinker, tiktoken, huggingface-hub, flask, flask_restful, accelerate, datasets + Attempting uninstall: tqdm + Found existing installation: tqdm 4.66.2 + Uninstalling tqdm-4.66.2: + Successfully uninstalled tqdm-4.66.2 + Attempting uninstall: safetensors + Found existing installation: safetensors 0.4.2 + Uninstalling safetensors-0.4.2: + Successfully uninstalled safetensors-0.4.2 + Attempting uninstall: requests + Found existing installation: requests 2.31.0 + Uninstalling requests-2.31.0: + Successfully uninstalled requests-2.31.0 + Attempting uninstall: pyarrow + Found existing installation: pyarrow 15.0.0 + Uninstalling pyarrow-15.0.0: + Successfully uninstalled pyarrow-15.0.0 + Attempting uninstall: huggingface-hub + Found existing installation: huggingface-hub 0.20.3 + Uninstalling huggingface-hub-0.20.3: + Successfully uninstalled huggingface-hub-0.20.3 + Attempting uninstall: accelerate + Found existing installation: accelerate 0.25.0 + Uninstalling accelerate-0.25.0: + Successfully uninstalled accelerate-0.25.0 + Attempting uninstall: datasets + Found existing installation: datasets 2.16.0 + Uninstalling datasets-2.16.0: + Successfully uninstalled datasets-2.16.0 +ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. +tikit 1.8.2.240926 requires dicttoxml==1.7.4, which is not installed. +tikit 1.8.2.240926 requires docopt==0.6.2, which is not installed. +tikit 1.8.2.240926 requires future==0.18.2, which is not installed. +tikit 1.8.2.240926 requires hdfs==2.6.0, which is not installed. +tikit 1.8.2.240926 requires pure-sasl==0.6.2, which is not installed. +tikit 1.8.2.240926 requires py4j==0.10.7, which is not installed. +tikit 1.8.2.240926 requires PyHive[hive]==0.6.4, which is not installed. +tikit 1.8.2.240926 requires pyjwt>=2.4.0, which is not installed. +tikit 1.8.2.240926 requires requests-kerberos>=0.14.0, which is not installed. +tikit 1.8.2.240926 requires sasl==0.3.1, which is not installed. +tikit 1.8.2.240926 requires thrift==0.15.0, which is not installed. +tikit 1.8.2.240926 requires thrift-sasl>=0.1.0, which is not installed. +tikit 1.8.2.240926 requires certifi==2021.10.8, but you have certifi 2024.2.2 which is incompatible. +tikit 1.8.2.240926 requires cos-python-sdk-v5==1.9.29, but you have cos-python-sdk-v5 1.9.26 which is incompatible. +tikit 1.8.2.240926 requires idna==3.3, but you have idna 3.6 which is incompatible. +tikit 1.8.2.240926 requires prettytable==2.5.0, but you have prettytable 3.11.0 which is incompatible. +tikit 1.8.2.240926 requires urllib3==1.26.7, but you have urllib3 1.26.18 which is incompatible. +tikit 1.8.2.240926 requires wcwidth==0.2.5, but you have wcwidth 0.2.13 which is incompatible. +Successfully installed accelerate-0.34.2 aniso8601-9.0.1 blinker-1.8.2 datasets-3.0.0 decord-0.6.0 expecttest-0.2.1 flask-3.0.3 flask_restful-0.3.10 huggingface-hub-0.26.2 itsdangerous-2.2.0 natsort-8.4.0 pyarrow-17.0.0 pybind11-2.13.6 requests-2.32.3 safetensors-0.4.5 tiktoken-0.7.0 tqdm-4.67.1 xlsxwriter-3.2.0 +WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv +++ return 0 ++ MEGATRON_DIR=/local_disk/cognitron_vl//third_party/Megatron-LM_core_r0.6.0/ ++ MINDSPEED_DIR=/local_disk/cognitron_vl//third_party/MindSpeed_core_r0.6.0/ ++ MODELLINK_DIR=/local_disk/cognitron_vl//third_party/ModelLink/ ++ pip3 install --no-index --find-links=/data/software/ -e /local_disk/cognitron_vl//third_party/Megatron-LM_core_r0.6.0/ +Looking in links: /data/software/ +Obtaining file://local_disk/cognitron_vl/third_party/Megatron-LM_core_r0.6.0 + Installing build dependencies: started + Installing build dependencies: finished with status 'done' + Checking if build backend supports build_editable: started + Checking if build backend supports build_editable: finished with status 'done' + Getting requirements to build editable: started + Getting requirements to build editable: finished with status 'done' + Installing backend dependencies: started + Installing backend dependencies: finished with status 'done' + Preparing editable metadata (pyproject.toml): started + Preparing editable metadata (pyproject.toml): finished with status 'done' +Building wheels for collected packages: megatron_core + Building editable for megatron_core (pyproject.toml): started + Building editable for megatron_core (pyproject.toml): finished with status 'done' + Created wheel for megatron_core: filename=megatron_core-0.6.0-0.editable-cp38-cp38-linux_x86_64.whl size=8791 sha256=1e5779b8c5e898345ad6f44325084fa7fe2e6049718a92d556233ebc8fb740ec + Stored in directory: /tmp/pip-ephem-wheel-cache-ujne6zp_/wheels/54/9c/d1/d2015aa0c34e791e64d65d19395e5a9a5528f0c63fd519b9ff +Successfully built megatron_core +DEPRECATION: apex 0.1-ascend-20240523 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of apex or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063 +Installing collected packages: megatron_core +Successfully installed megatron_core-0.6.0 +WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv ++ pip3 install --no-index --find-links=/data/software/ -e /local_disk/cognitron_vl//third_party/MindSpeed_core_r0.6.0/ +Looking in links: /data/software/ +Obtaining file://local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0 + Preparing metadata (setup.py): started + Preparing metadata (setup.py): finished with status 'done' +WARNING: Error parsing requirements for tokenizers: [Errno 2] No such file or directory: '/root/miniconda3/envs/py38/lib/python3.8/site-packages/tokenizers-0.19.1.dist-info/METADATA' +WARNING: Error parsing requirements for transformers: [Errno 2] No such file or directory: '/root/miniconda3/envs/py38/lib/python3.8/site-packages/transformers-4.40.1.dist-info/METADATA' +DEPRECATION: apex 0.1-ascend-20240523 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of apex or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063 +Installing collected packages: mindspeed + Running setup.py develop for mindspeed +Successfully installed mindspeed-0.6.0 +WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv ++ pip3 install --no-index --find-links=/data/software/ -e /local_disk/cognitron_vl//third_party/ModelLink/ +Looking in links: /data/software/ +Obtaining file://local_disk/cognitron_vl/third_party/ModelLink + Preparing metadata (setup.py): started + Preparing metadata (setup.py): finished with status 'done' +Requirement already satisfied: numpy in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (1.24.4) +Processing data/software/transformers-4.43.2-py3-none-any.whl (from modellink==0.0.1) +Processing data/software/transformers-stream-generator-0.0.5.tar.gz (from modellink==0.0.1) + Preparing metadata (setup.py): started + Preparing metadata (setup.py): finished with status 'done' +Requirement already satisfied: sympy in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (1.4) +Requirement already satisfied: decorator in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (5.1.1) +Requirement already satisfied: scipy in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (1.10.1) +Requirement already satisfied: sentencepiece in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (0.2.0) +Requirement already satisfied: einops in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (0.7.0) +Requirement already satisfied: datasets in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (3.0.0) +Requirement already satisfied: pybind11 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (2.13.6) +Requirement already satisfied: accelerate in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (0.34.2) +Requirement already satisfied: six in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (1.16.0) +Requirement already satisfied: protobuf in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (4.25.3) +Processing data/software/peft-0.7.1-py3-none-any.whl (from modellink==0.0.1) +Requirement already satisfied: tiktoken in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (0.7.0) +Requirement already satisfied: packaging>=20.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (23.2) +Requirement already satisfied: psutil in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (5.9.8) +Requirement already satisfied: pyyaml in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (5.4.1) +Requirement already satisfied: torch>=1.13.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (2.1.0+cpu) +Requirement already satisfied: tqdm in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (4.67.1) +Requirement already satisfied: safetensors in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (0.4.5) +Requirement already satisfied: huggingface-hub>=0.17.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (0.26.2) +Requirement already satisfied: filelock in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from transformers==4.43.2->modellink==0.0.1) (3.13.1) +Requirement already satisfied: regex!=2019.12.17 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from transformers==4.43.2->modellink==0.0.1) (2023.12.25) +Requirement already satisfied: requests in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from transformers==4.43.2->modellink==0.0.1) (2.32.3) +Processing data/software/tokenizers-0.19.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (from transformers==4.43.2->modellink==0.0.1) +Requirement already satisfied: pyarrow>=15.0.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (17.0.0) +Requirement already satisfied: dill<0.3.9,>=0.3.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (0.3.7) +Requirement already satisfied: pandas in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (2.0.3) +Requirement already satisfied: xxhash in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (3.4.1) +Requirement already satisfied: multiprocess in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (0.70.15) +Requirement already satisfied: fsspec<=2024.6.1,>=2023.1.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from fsspec[http]<=2024.6.1,>=2023.1.0->datasets->modellink==0.0.1) (2023.10.0) +Requirement already satisfied: aiohttp in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (3.9.3) +Requirement already satisfied: mpmath>=0.19 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from sympy->modellink==0.0.1) (1.3.0) +Requirement already satisfied: aiosignal>=1.1.2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (1.3.1) +Requirement already satisfied: attrs>=17.3.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (23.2.0) +Requirement already satisfied: frozenlist>=1.1.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (1.4.1) +Requirement already satisfied: multidict<7.0,>=4.5 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (6.0.5) +Requirement already satisfied: yarl<2.0,>=1.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (1.9.4) +Requirement already satisfied: async-timeout<5.0,>=4.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (4.0.3) +Requirement already satisfied: typing-extensions>=3.7.4.3 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from huggingface-hub>=0.17.0->peft==0.7.1->modellink==0.0.1) (4.10.0) +Requirement already satisfied: charset-normalizer<4,>=2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests->transformers==4.43.2->modellink==0.0.1) (3.3.2) +Requirement already satisfied: idna<4,>=2.5 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests->transformers==4.43.2->modellink==0.0.1) (3.6) +Requirement already satisfied: urllib3<3,>=1.21.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests->transformers==4.43.2->modellink==0.0.1) (1.26.18) +Requirement already satisfied: certifi>=2017.4.17 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests->transformers==4.43.2->modellink==0.0.1) (2024.2.2) +Requirement already satisfied: networkx in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from torch>=1.13.0->peft==0.7.1->modellink==0.0.1) (3.1) +Requirement already satisfied: jinja2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from torch>=1.13.0->peft==0.7.1->modellink==0.0.1) (3.1.3) +Requirement already satisfied: python-dateutil>=2.8.2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from pandas->datasets->modellink==0.0.1) (2.8.2) +Requirement already satisfied: pytz>=2020.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from pandas->datasets->modellink==0.0.1) (2024.1) +Requirement already satisfied: tzdata>=2022.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from pandas->datasets->modellink==0.0.1) (2024.1) +Requirement already satisfied: MarkupSafe>=2.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from jinja2->torch>=1.13.0->peft==0.7.1->modellink==0.0.1) (2.1.5) +Building wheels for collected packages: transformers_stream_generator + Building wheel for transformers_stream_generator (setup.py): started + Building wheel for transformers_stream_generator (setup.py): finished with status 'done' + Created wheel for transformers_stream_generator: filename=transformers_stream_generator-0.0.5-py3-none-any.whl size=12425 sha256=40b0a084a3d16439663e6c0dd328303d5ed7e558df43c73d4f722bde0914a923 + Stored in directory: /root/.cache/pip/wheels/56/8c/42/5381d9c36bc85f28982f4cf8f98dc44d37a6d6c04897a5cb7c +Successfully built transformers_stream_generator +DEPRECATION: apex 0.1-ascend-20240523 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of apex or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063 +Installing collected packages: tokenizers, transformers, transformers_stream_generator, peft, modellink + Attempting uninstall: tokenizers + Found existing installation: tokenizers 0.20.3 + Uninstalling tokenizers-0.20.3: + Successfully uninstalled tokenizers-0.20.3 + Attempting uninstall: transformers + Found existing installation: transformers 4.46.3 + Uninstalling transformers-4.46.3: + Successfully uninstalled transformers-4.46.3 + Attempting uninstall: peft + Found existing installation: peft 0.7.0 + Uninstalling peft-0.7.0: + Successfully uninstalled peft-0.7.0 + Running setup.py develop for modellink +Successfully installed modellink-0.0.1 peft-0.7.1 tokenizers-0.19.1 transformers-4.43.2 transformers_stream_generator-0.0.5 +WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv ++ export PYTHONPATH=/local_disk/cognitron_vl//third_party/Megatron-LM_core_r0.6.0//:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe: ++ PYTHONPATH=/local_disk/cognitron_vl//third_party/Megatron-LM_core_r0.6.0//:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe: ++ GPUS_PER_NODE=16 ++ NNODES=32 ++ NODE_RANK=23 ++ MASTER_PORT=34567 ++ export CUDA_DEVICE_MAX_CONNECTIONS=1 ++ CUDA_DEVICE_MAX_CONNECTIONS=1 ++ export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True ++ PYTORCH_NPU_ALLOC_CONF=expandable_segments:True ++ VISION_SEQ_LENGTH=1025 ++ IMAGE_TOKEN_LENGTH=256 ++ IMAGE_SIZE=448 ++ VISION_MODEL_TYPE=intern_300m ++ TP=8 ++ PP=1 ++ CP=8 ++ CP_ALGO=megatron_cp_algo ++ CP_MASK=causal ++ DISTRIBUTED_ARGS=' + --nproc_per_node 16 --nnodes 32 --node_rank 23 --master_addr train-1198772881325351168-93vlj4s2getc-master-0.train-100034032793.svc.cluster.local --master_port 34567 +' ++ GPT_ARGS=' + --use-mcore-models --tensor-model-parallel-size 8 --pipeline-model-parallel-size 1 --context-parallel-size 8 --context-parallel-algo megatron_cp_algo --cp-attention-mask-type causal --use-cp-send-recv-overlap --no-create-attention-mask-in-dataloader --sparse-mode 4 --sequence-parallel --recompute-method block --recompute-granularity full --recompute-num-layers 48 --num-layers 48 --hidden-size 5120 --ffn-hidden-size 13824 --num-attention-heads 40 --group-query-attention --num-query-groups 8 --tokenizer-type PretrainedFromHF --tokenizer-name-or-path /data_4/models/Qwen/Qwen2.5-14B-Instruct/ --seq-length 1048576 --max-position-embeddings 1048576 --micro-batch-size 1 --global-batch-size 8 --make-vocab-size-divisible-by 1 --padded-vocab-size 152064 --rotary-base 1000000.0 --lr 5.00e-6 --train-iters 500 --lr-decay-style cosine --untie-embeddings-and-output-weights --disable-bias-linear --attention-dropout 0.0 --init-method-std 0.01 --hidden-dropout 0.0 --position-embedding-type rope --normalization RMSNorm --use-fused-rmsnorm --norm-epsilon 1e-6 --swiglu --use-flash-attn --use-fused-rotary-pos-emb --use-rotary-position-embeddings --use-fused-swiglu --use-mc2 --no-masked-softmax-fusion --attention-softmax-in-fp32 --min-lr 1.00e-7 --weight-decay 0.0 --lr-warmup-fraction 0.03 --clip-grad 1.0 --adam-beta1 0.9 --adam-beta2 0.999 --add-qkv-bias --initial-loss-scale 4096 --no-gradient-accumulation-fusion --use-distributed-optimizer --bf16 --overlap-grad-reduce --finetune --vision-model-freeze --vision-model-type intern_300m --vision-downsample-ratio 0.5 --vision-projector-type mlp --vision-projector-pre-norm --vision-process-type dynamic --vision-normalize-type imagenet --vision-seq-length 1025 --image-token-length 256 --image-size 448 --prompt-format qwen2 --is-instruction-dataset --max-num-image 4096 --max-fps 1 --add-class-token --min-patch-grid 1 --max-patch-grid 12 --logit-mask --cross-dataset-joint ' ++ DATA_ARGS=' + --data-path /local_disk/cognitron_vl//configs/lcvlm_finetune_stage4.yaml --split 100,0,0 --data-seq-length 1048576 --num-workers 8 ' ++ CKPT_ARGS=' + --load /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/ --vit-load / --no-load-optim --no-load-rng --seed 42424242 --save /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743// ' ++ OUTPUT_ARGS=' + --log-interval 1 --save-interval 20 --eval-interval 20 --eval-iters 0 --log-throughput --distributed-timeout-minutes 120 ' ++ torchrun --nproc_per_node 16 --nnodes 32 --node_rank 23 --master_addr train-1198772881325351168-93vlj4s2getc-master-0.train-100034032793.svc.cluster.local --master_port 34567 /local_disk/cognitron_vl//lcvlm_modellink/pretrain_lcvlm.py --use-mcore-models --tensor-model-parallel-size 8 --pipeline-model-parallel-size 1 --context-parallel-size 8 --context-parallel-algo megatron_cp_algo --cp-attention-mask-type causal --use-cp-send-recv-overlap --no-create-attention-mask-in-dataloader --sparse-mode 4 --sequence-parallel --recompute-method block --recompute-granularity full --recompute-num-layers 48 --num-layers 48 --hidden-size 5120 --ffn-hidden-size 13824 --num-attention-heads 40 --group-query-attention --num-query-groups 8 --tokenizer-type PretrainedFromHF --tokenizer-name-or-path /data_4/models/Qwen/Qwen2.5-14B-Instruct/ --seq-length 1048576 --max-position-embeddings 1048576 --micro-batch-size 1 --global-batch-size 8 --make-vocab-size-divisible-by 1 --padded-vocab-size 152064 --rotary-base 1000000.0 --lr 5.00e-6 --train-iters 500 --lr-decay-style cosine --untie-embeddings-and-output-weights --disable-bias-linear --attention-dropout 0.0 --init-method-std 0.01 --hidden-dropout 0.0 --position-embedding-type rope --normalization RMSNorm --use-fused-rmsnorm --norm-epsilon 1e-6 --swiglu --use-flash-attn --use-fused-rotary-pos-emb --use-rotary-position-embeddings --use-fused-swiglu --use-mc2 --no-masked-softmax-fusion --attention-softmax-in-fp32 --min-lr 1.00e-7 --weight-decay 0.0 --lr-warmup-fraction 0.03 --clip-grad 1.0 --adam-beta1 0.9 --adam-beta2 0.999 --add-qkv-bias --initial-loss-scale 4096 --no-gradient-accumulation-fusion --use-distributed-optimizer --bf16 --overlap-grad-reduce --finetune --vision-model-freeze --vision-model-type intern_300m --vision-downsample-ratio 0.5 --vision-projector-type mlp --vision-projector-pre-norm --vision-process-type dynamic --vision-normalize-type imagenet --vision-seq-length 1025 --image-token-length 256 --image-size 448 --prompt-format qwen2 --is-instruction-dataset --max-num-image 4096 --max-fps 1 --add-class-token --min-patch-grid 1 --max-patch-grid 12 --logit-mask --cross-dataset-joint --data-path /local_disk/cognitron_vl//configs/lcvlm_finetune_stage4.yaml --split 100,0,0 --data-seq-length 1048576 --num-workers 8 --load /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/ --vit-load / --no-load-optim --no-load-rng --seed 42424242 --save /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743// --log-interval 1 --save-interval 20 --eval-interval 20 --eval-iters 0 --log-throughput --distributed-timeout-minutes 120 --distributed-backend nccl +[2024-11-28 15:50:32,161] torch.distributed.run: [WARNING] +[2024-11-28 15:50:32,161] torch.distributed.run: [WARNING] ***************************************** +[2024-11-28 15:50:32,161] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +[2024-11-28 15:50:32,161] torch.distributed.run: [WARNING] ***************************************** +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Creating extension directory /root/.cache/torch_extensions/py38_cpu/adaptive_cp... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Emitting ninja build file /root/.cache/torch_extensions/py38_cpu/adaptive_cp/build.ninja... +Building extension module adaptive_cp... +Allowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N) +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +[1/2] c++ -MMD -MF adaptive_cp.o.d -DTORCH_EXTENSION_NAME=adaptive_cp -DTORCH_API_INCLUDE_EXTENSION_H -DPYBIND11_COMPILER_TYPE=\"_gcc\" -DPYBIND11_STDLIB=\"_libstdcpp\" -DPYBIND11_BUILD_ABI=\"_cxxabi1011\" -I/usr/local/Ascend/ascend-toolkit/latest/include -I/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch_npu/include -I/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch_npu/third_party -I/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch_npu/acl -I/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch_npu/inc -isystem /root/miniconda3/envs/py38/lib/python3.8/site-packages/torch/include -isystem /root/miniconda3/envs/py38/lib/python3.8/site-packages/torch/include/torch/csrc/api/include -isystem /root/miniconda3/envs/py38/lib/python3.8/site-packages/torch/include/TH -isystem /root/miniconda3/envs/py38/lib/python3.8/site-packages/torch/include/THC -isystem /root/miniconda3/envs/py38/include/python3.8 -D_GLIBCXX_USE_CXX11_ABI=0 -fPIC -std=c++17 -fstack-protector-all -Wl,-z,relro,-z,now,-z,noexecstack -fPIC -pie -Wl,--disable-new-dtags,--rpath -s -O2 -c local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/ops/csrc/algorithm/adaptive_cp/adaptive_cp.cpp -o adaptive_cp.o +[2/2] c++ adaptive_cp.o -shared -L/usr/local/Ascend/ascend-toolkit/latest/lib64 -lascendcl -L/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch_npu/lib -ltorch_npu -L/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch/lib -lc10 -ltorch_cpu -ltorch -ltorch_python -o adaptive_cp.so +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +Loading extension module adaptive_cp... +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +Loading extension module adaptive_cp... +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +> compiling dataset index builder ... +make: Entering directory 'local_disk/cognitron_vl/third_party/Megatron-LM_core_r0.6.0/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory 'local_disk/cognitron_vl/third_party/Megatron-LM_core_r0.6.0/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.946 seconds +vision_projector_recompute False +vision_projector_recompute Falsevision_projector_recompute Falsevision_projector_recompute False + + +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute Falsevision_projector_recompute False + +vision_projector_recompute False +vision_projector_recompute False +vision_model_freeze +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +vision_model_freeze=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +vision_model_freeze=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +vision_model_freeze=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +vision_model_freeze=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +vision_model_freeze +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +vision_model_freeze +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. + +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +)=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +vision_model_freeze +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +)=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False.model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +vision_model_freeze +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +)=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. + +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +vision_model_freeze=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.vision_model_freeze +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. + +vision_model_freeze=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) + + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +)=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. + + +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. + +vision_model_freeze=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +vision_model_freeze +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +vision_model_freeze +vision_model_freeze +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False.=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. + +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + + + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + + + +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + + + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) + + + + + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + + + +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration _load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration _load_base_checkpoint iteration1000_load_base_checkpoint iteration_load_base_checkpoint iteration1000_load_base_checkpoint iteration _load_base_checkpoint iteration10001000 100010001000 + + +_load_base_checkpoint release1000 +_load_base_checkpoint release +1000 + +_load_base_checkpoint release10001000_load_base_checkpoint release1000 + _load_base_checkpoint releaseFalse _load_base_checkpoint release1000 +1000 + +_load_base_checkpoint release_load_base_checkpoint release +10001000_load_base_checkpoint release + + _load_base_checkpoint release _load_base_checkpoint release +FalseFalse + + False_load_base_checkpoint release + _load_base_checkpoint releaseFalse_load_base_checkpoint release + FalseFalseFalseFalse +_load_base_checkpoint releaseFalse False +False _load_base_checkpoint release False + + + + + + + +FalseFalseFalse + + +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_06/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_06/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_05/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_01/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_02/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_00/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_00/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_03/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_03/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_04/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_05/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_01/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_07/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_04/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_02/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_07/model_optim_rng.pt +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +> rank 375 does not create GPT datasets ... +> rank 382 does not create GPT datasets ... +> rank 369 does not create GPT datasets ... +> rank 381 does not create GPT datasets ... +> rank 383 does not create GPT datasets ... +> rank 377 does not create GPT datasets ... +> rank 371 does not create GPT datasets ... +> rank 372 does not create GPT datasets ... +> rank 378 does not create GPT datasets ... +> rank 370 does not create GPT datasets ... +> rank 379 does not create GPT datasets ... +> rank 374 does not create GPT datasets ... +> rank 380 does not create GPT datasets ...> rank 376 is creating GPT datasets ... + +> rank 373 does not create GPT datasets ... +> rank 368 is creating GPT datasets ... +target_ratios [(1, 1), (1, 2), (2, 1), (3, 1), (1, 3), (2, 2), (4, 1), (1, 4), (5, 1), (1, 5), (1, 6), (6, 1), (3, 2), (2, 3), (7, 1), (1, 7), (4, 2), (2, 4), (1, 8), (8, 1), (1, 9), (3, 3), (9, 1), (2, 5), (5, 2), (10, 1), (1, 10), (11, 1), (1, 11), (12, 1), (3, 4), (4, 3), (1, 12), (6, 2), (2, 6)] +possible_resolutions [[448, 448], [448, 896], [896, 448], [1344, 448], [448, 1344], [896, 896], [1792, 448], [448, 1792], [2240, 448], [448, 2240], [448, 2688], [2688, 448], [1344, 896], [896, 1344], [3136, 448], [448, 3136], [1792, 896], [896, 1792], [448, 3584], [3584, 448], [448, 4032], [1344, 1344], [4032, 448], [896, 2240], [2240, 896], [4480, 448], [448, 4480], [4928, 448], [448, 4928], [5376, 448], [1344, 1792], [1792, 1344], [448, 5376], [2688, 896], [896, 2688]] +target_ratios [(1, 1), (1, 2), (2, 1), (3, 1), (1, 3), (2, 2), (4, 1), (1, 4), (5, 1), (1, 5), (1, 6), (6, 1), (3, 2), (2, 3), (7, 1), (1, 7), (4, 2), (2, 4), (1, 8), (8, 1), (1, 9), (3, 3), (9, 1), (2, 5), (5, 2), (10, 1), (1, 10), (11, 1), (1, 11), (12, 1), (3, 4), (4, 3), (1, 12), (6, 2), (2, 6)] +possible_resolutions [[448, 448], [448, 896], [896, 448], [1344, 448], [448, 1344], [896, 896], [1792, 448], [448, 1792], [2240, 448], [448, 2240], [448, 2688], [2688, 448], [1344, 896], [896, 1344], [3136, 448], [448, 3136], [1792, 896], [896, 1792], [448, 3584], [3584, 448], [448, 4032], [1344, 1344], [4032, 448], [896, 2240], [2240, 896], [4480, 448], [448, 4480], [4928, 448], [448, 4928], [5376, 448], [1344, 1792], [1792, 1344], [448, 5376], [2688, 896], [896, 2688]] +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9ce52740] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +processed_samples 100 unjoint_samples 100 joint_samples 0 [197905, 134179] +processed_samples 100 unjoint_samples 100 joint_samples 0 [197905, 134179] +processed_samples 100 unjoint_samples 100 joint_samples 0 [134741, 133593] +processed_samples 100 unjoint_samples 100 joint_samples 0 [134741, 133593] +processed_samples 100 unjoint_samples 100 joint_samples 0 [159422, 136193] +processed_samples 100 unjoint_samples 100 joint_samples 0 [159422, 136193] +processed_samples 100 unjoint_samples 100 joint_samples 0 [117487, 115733] +processed_samples 100 unjoint_samples 100 joint_samples 0 [117487, 115733] +processed_samples 100 unjoint_samples 100 joint_samples 0 [190378, 170061] +processed_samples 100 unjoint_samples 100 joint_samples 0 [190378, 170061] +processed_samples 100 unjoint_samples 100 joint_samples 0 [185841, 184225] +processed_samples 100 unjoint_samples 100 joint_samples 0 [208234, 208115] +processed_samples 100 unjoint_samples 100 joint_samples 0 [185841, 184225] +processed_samples 100 unjoint_samples 100 joint_samples 0 [208234, 208115] +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55ced2efdbc0] mmco: unref short failure +processed_samples 100 unjoint_samples 100 joint_samples 0 [187071, 187260] +processed_samples 100 unjoint_samples 100 joint_samples 0 [187071, 187260] +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9f2e1b40] mmco: unref short failure +[h264 @ 0x556c9f2e1b40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +processed_samples 200 unjoint_samples 200 joint_samples 0 [289730, 297302] +processed_samples 200 unjoint_samples 200 joint_samples 0 [289730, 297302] +processed_samples 200 unjoint_samples 200 joint_samples 0 [305459, 305985] +processed_samples 200 unjoint_samples 200 joint_samples 0 [305459, 305985] +processed_samples 200 unjoint_samples 200 joint_samples 0 [356626, 349384] +processed_samples 200 unjoint_samples 200 joint_samples 0 [310700, 314602] +processed_samples 200 unjoint_samples 200 joint_samples 0 [310700, 314602] +processed_samples 200 unjoint_samples 200 joint_samples 0 [356626, 349384] +processed_samples 200 unjoint_samples 200 joint_samples 0 [436496, 453574] +processed_samples 200 unjoint_samples 200 joint_samples 0 [436496, 453574] +processed_samples 200 unjoint_samples 200 joint_samples 0 [362580, 362471] +processed_samples 200 unjoint_samples 200 joint_samples 0 [362580, 362471] +processed_samples 200 unjoint_samples 200 joint_samples 0 [346735, 416241] +processed_samples 200 unjoint_samples 200 joint_samples 0 [346735, 416241] +processed_samples 200 unjoint_samples 200 joint_samples 0 [327597, 329445] +processed_samples 200 unjoint_samples 200 joint_samples 0 [327597, 329445] +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9c88ab40] mmco: unref short failure +[h264 @ 0x556c9c88ab40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +processed_samples 300 unjoint_samples 300 joint_samples 0 [519491, 520416] +processed_samples 300 unjoint_samples 300 joint_samples 0 [519491, 520416] +processed_samples 300 unjoint_samples 300 joint_samples 0 [523351, 593806] +processed_samples 300 unjoint_samples 300 joint_samples 0 [523351, 593806] +processed_samples 300 unjoint_samples 300 joint_samples 0 [473732, 473543] +processed_samples 300 unjoint_samples 300 joint_samples 0 [473732, 473543] +processed_samples 300 unjoint_samples 300 joint_samples 0 [507390, 505978] +processed_samples 300 unjoint_samples 300 joint_samples 0 [507390, 505978] +processed_samples 300 unjoint_samples 300 joint_samples 0 [586823, 584007] +processed_samples 300 unjoint_samples 300 joint_samples 0 [586823, 584007] +processed_samples 300 unjoint_samples 300 joint_samples 0 [523663, 522172] +processed_samples 300 unjoint_samples 300 joint_samples 0 [523663, 522172] +processed_samples 300 unjoint_samples 300 joint_samples 0 [512039, 512920] +processed_samples 300 unjoint_samples 300 joint_samples 0 [512039, 512920] +processed_samples 300 unjoint_samples 300 joint_samples 0 [463947, 462098] +processed_samples 300 unjoint_samples 300 joint_samples 0 [463947, 462098] +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9a9f2b80] mmco: unref short failure +[h264 @ 0x556c9a9f2b80] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x55cec1164740] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9b6ce740] mmco: unref short failure +[h264 @ 0x556c9b6ce740] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +processed_samples 400 unjoint_samples 400 joint_samples 0 [666530, 669271] +processed_samples 400 unjoint_samples 400 joint_samples 0 [666530, 669271] +processed_samples 400 unjoint_samples 400 joint_samples 0 [709094, 709305] +processed_samples 400 unjoint_samples 400 joint_samples 0 [709094, 709305] +processed_samples 400 unjoint_samples 400 joint_samples 0 [733066, 734166] +processed_samples 400 unjoint_samples 400 joint_samples 0 [746667, 685018] +processed_samples 400 unjoint_samples 400 joint_samples 0 [733066, 734166] +processed_samples 400 unjoint_samples 400 joint_samples 0 [746667, 685018] +processed_samples 400 unjoint_samples 400 joint_samples 0 [634494, 682714] +processed_samples 400 unjoint_samples 400 joint_samples 0 [634494, 682714] +processed_samples 400 unjoint_samples 400 joint_samples 0 [683436, 705532] +processed_samples 400 unjoint_samples 400 joint_samples 0 [683436, 705532] +processed_samples 400 unjoint_samples 400 joint_samples 0 [579712, 587326] +processed_samples 400 unjoint_samples 400 joint_samples 0 [579712, 587326] +processed_samples 400 unjoint_samples 400 joint_samples 0 [692885, 694327] +processed_samples 400 unjoint_samples 400 joint_samples 0 [692885, 694327] +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecd8b7f40] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9c068800] mmco: unref short failure +[h264 @ 0x556c9c068800] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9f3e3200] mmco: unref short failure +[h264 @ 0x556c9f3e3200] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +processed_samples 500 unjoint_samples 500 joint_samples 0 [775954, 800829] +[h264 @ 0x556c9b6ce740] mmco: unref short failure +processed_samples 500 unjoint_samples 500 joint_samples 0 [775954, 800829] +processed_samples 500 unjoint_samples 500 joint_samples 0 [985893, 927994] +processed_samples 500 unjoint_samples 500 joint_samples 0 [985893, 927994] +processed_samples 500 unjoint_samples 500 joint_samples 0 [866870, 850073] +processed_samples 500 unjoint_samples 500 joint_samples 0 [866870, 850073] +processed_samples 500 unjoint_samples 500 joint_samples 0 [818762, 810718] +processed_samples 500 unjoint_samples 500 joint_samples 0 [818762, 810718] +processed_samples 500 unjoint_samples 500 joint_samples 0 [869264, 867504] +processed_samples 500 unjoint_samples 500 joint_samples 0 [869264, 867504] +processed_samples 500 unjoint_samples 500 joint_samples 0 [884008, 883626] +processed_samples 500 unjoint_samples 500 joint_samples 0 [825296, 823918] +processed_samples 500 unjoint_samples 500 joint_samples 0 [825296, 823918] +processed_samples 500 unjoint_samples 500 joint_samples 0 [884008, 883626] +processed_samples 500 unjoint_samples 500 joint_samples 0 [749195, 761412] +processed_samples 500 unjoint_samples 500 joint_samples 0 [749195, 761412] +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9b6ce740] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +................................................................................................[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +processed_samples 600 unjoint_samples 600 joint_samples 0 [933675, 933665] +processed_samples 600 unjoint_samples 600 joint_samples 0 [1037855, 1037699] +processed_samples 600 unjoint_samples 600 joint_samples 0 [1037855, 1037699] +processed_samples 600 unjoint_samples 600 joint_samples 0 [933675, 933665] +processed_samples 600 unjoint_samples 600 joint_samples 0 [986231, 984233] +processed_samples 600 unjoint_samples 600 joint_samples 0 [986231, 984233] +processed_samples 600 unjoint_samples 600 joint_samples 1 [1015010, 252418] +processed_samples 600 unjoint_samples 600 joint_samples 1 [1015010, 252418] +processed_samples 600 unjoint_samples 600 joint_samples 0 [993624, 992518] +processed_samples 600 unjoint_samples 600 joint_samples 0 [993624, 992518] +processed_samples 600 unjoint_samples 600 joint_samples 0 [938554, 941347] +processed_samples 600 unjoint_samples 600 joint_samples 0 [938554, 941347] +processed_samples 600 unjoint_samples 600 joint_samples 0 [1019225, 1019435] +processed_samples 600 unjoint_samples 600 joint_samples 0 [1022540, 1023523] +processed_samples 600 unjoint_samples 600 joint_samples 0 [1022540, 1023523] +processed_samples 600 unjoint_samples 600 joint_samples 0 [1019225, 1019435] +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +processed_samples 700 unjoint_samples 700 joint_samples 1 [201483, 1043567] +processed_samples 700 unjoint_samples 700 joint_samples 1 [201483, 1043567] +processed_samples 700 unjoint_samples 700 joint_samples 1 [186475, 1047431] +processed_samples 700 unjoint_samples 700 joint_samples 1 [186475, 1047431] +processed_samples 700 unjoint_samples 700 joint_samples 1 [1042127, 75349] +processed_samples 700 unjoint_samples 700 joint_samples 1 [1042127, 75349] +processed_samples 700 unjoint_samples 700 joint_samples 1 [146162, 1046795] +processed_samples 700 unjoint_samples 700 joint_samples 1 [168151, 1037213] +processed_samples 700 unjoint_samples 700 joint_samples 1 [146162, 1046795] +processed_samples 700 unjoint_samples 700 joint_samples 1 [168151, 1037213] +processed_samples 700 unjoint_samples 700 joint_samples 1 [1015010, 569497] +processed_samples 700 unjoint_samples 700 joint_samples 1 [255283, 1047109] +processed_samples 700 unjoint_samples 700 joint_samples 1 [255283, 1047109] +processed_samples 700 unjoint_samples 700 joint_samples 1 [1015010, 569497] +processed_samples 700 unjoint_samples 700 joint_samples 1 [1023334, 351204] +processed_samples 700 unjoint_samples 700 joint_samples 1 [1023334, 351204] +[h264 @ 0x556c9b7845c0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +processed_samples 800 unjoint_samples 800 joint_samples 1 [1023334, 673140] +processed_samples 800 unjoint_samples 800 joint_samples 1 [1023334, 673140] +processed_samples 800 unjoint_samples 800 joint_samples 1 [1042127, 407777] +processed_samples 800 unjoint_samples 800 joint_samples 1 [1042127, 407777] +processed_samples 800 unjoint_samples 800 joint_samples 1 [494826, 1046795] +processed_samples 800 unjoint_samples 800 joint_samples 1 [494826, 1046795] +processed_samples 800 unjoint_samples 800 joint_samples 1 [532545, 1043567] +processed_samples 800 unjoint_samples 800 joint_samples 1 [532545, 1043567] +processed_samples 800 unjoint_samples 800 joint_samples 1 [439975, 1047431] +processed_samples 800 unjoint_samples 800 joint_samples 1 [439975, 1047431] +processed_samples 800 unjoint_samples 800 joint_samples 1 [451239, 1037213] +processed_samples 800 unjoint_samples 800 joint_samples 1 [451239, 1037213] +processed_samples 800 unjoint_samples 800 joint_samples 1 [778310, 1047109] +processed_samples 800 unjoint_samples 800 joint_samples 1 [778310, 1047109] +processed_samples 800 unjoint_samples 800 joint_samples 1 [1015010, 877235] +processed_samples 800 unjoint_samples 800 joint_samples 1 [1015010, 877235] +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9f3e3200] mmco: unref short failure +[h264 @ 0x556c9f3e3200] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +processed_samples 900 unjoint_samples 900 joint_samples 2 [1027517, 66730] +processed_samples 900 unjoint_samples 900 joint_samples 2 [1027517, 66730] +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9b7845c0] mmco: unref short failure +[h264 @ 0x556c9b7845c0] mmco: unref short failure +processed_samples 900 unjoint_samples 900 joint_samples 2 [123915, 1047109] +processed_samples 900 unjoint_samples 900 joint_samples 2 [123915, 1047109] +processed_samples 900 unjoint_samples 900 joint_samples 1 [769222, 1043567] +processed_samples 900 unjoint_samples 900 joint_samples 1 [769222, 1043567] +processed_samples 900 unjoint_samples 900 joint_samples 1 [1042127, 749269] +processed_samples 900 unjoint_samples 900 joint_samples 1 [1042127, 749269] +processed_samples 900 unjoint_samples 900 joint_samples 1 [800434, 1047431] +processed_samples 900 unjoint_samples 900 joint_samples 1 [800434, 1047431] +processed_samples 900 unjoint_samples 900 joint_samples 1 [712071, 1037213] +processed_samples 900 unjoint_samples 900 joint_samples 1 [712071, 1037213] +processed_samples 900 unjoint_samples 900 joint_samples 2 [52184, 1044984] +processed_samples 900 unjoint_samples 900 joint_samples 2 [52184, 1044984] +processed_samples 900 unjoint_samples 900 joint_samples 1 [792312, 1046795] +processed_samples 900 unjoint_samples 900 joint_samples 1 [792312, 1046795] +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9a9f2b80] mmco: unref short failure +[h264 @ 0x556c9a9f2b80] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecc696840] mmco: unref short failure +[h264 @ 0x55cecc696840] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [298170, 1044984] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [298170, 1044984] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1020195, 160921] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1020195, 160921] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [399546, 1047109] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [399546, 1047109] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1027517, 419914] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1027517, 419914] +processed_samples 1000 unjoint_samples 1000 joint_samples 1 [1045996, 1046094] +processed_samples 1000 unjoint_samples 1000 joint_samples 1 [1042127, 1018024] +processed_samples 1000 unjoint_samples 1000 joint_samples 1 [1045996, 1046094] +processed_samples 1000 unjoint_samples 1000 joint_samples 1 [1042127, 1018024] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [122146, 1046795] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [122146, 1046795] +processed_samples 1000 unjoint_samples 1000 joint_samples 1 [987617, 1037213] +processed_samples 1000 unjoint_samples 1000 joint_samples 1 [987617, 1037213] +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1048193, 298320] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1048193, 298320] +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [553248, 1044984] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [553248, 1044984] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [472147, 1046795] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [472147, 1046795] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [226397, 1047934] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [226397, 1047934] +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1020195, 495485] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [763392, 1047109] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [763392, 1047109] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1020195, 495485] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1046014, 217969] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1046014, 217969] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1027517, 779104] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1027517, 779104] +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9c773b80] mmco: unref short failure +[h264 @ 0x556c9c773b80] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1048193, 629289] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1046014, 477683] +[h264 @ 0x55cecd757f00] mmco: unref short failure +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [803261, 1044984] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1048193, 629289] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1046014, 477683] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [803261, 1044984] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1027517, 1014989] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1027517, 1014989] +processed_samples 1200 unjoint_samples 1200 joint_samples 3 [1040561, 14944] +processed_samples 1200 unjoint_samples 1200 joint_samples 3 [1040561, 14944] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [653048, 1047934] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [653048, 1047934] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [725185, 1046795] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [725185, 1046795] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1020195, 798140] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1020195, 798140] +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55ced04fa540] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9c88ab40] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [20742, 1046583] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [924679, 1047934] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [1040561, 351866] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [412877, 1035965] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [1048193, 977019] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [1048193, 977019] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [1046014, 818102] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [1002267, 1046795] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [20742, 1046583] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [924679, 1047934] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [1040561, 351866] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [412877, 1035965] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [1046014, 818102] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [1002267, 1046795] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [1046068, 1044984] +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [1046068, 1044984] +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9f3e3200] mmco: unref short failure +[h264 @ 0x556c9f3e3200] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecc6a30c0] mmco: unref short failure +[h264 @ 0x55cecc6a30c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecc6a30c0] mmco: unref short failure +[h264 @ 0x55cecc6a30c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecc6a30c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecc6a30c0] mmco: unref short failure +[h264 @ 0x55cecc6a30c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1040561, 686967] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1040561, 686967] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [159907, 1042290] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [159907, 1042290] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [403690, 1047777] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [403690, 1047777] +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [245548, 1046795] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [245548, 1046795] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [711175, 1035965] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [711175, 1035965] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1048020, 336474] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1048020, 336474] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1046481, 289443] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1046481, 289443] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [375511, 1046583] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [375511, 1046583] +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecd2a95c0] mmco: unref short failure +[h264 @ 0x55cecd2a95c0] mmco: unref short failure +[h264 @ 0x55cecd2a95c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1048020, 706961] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1048020, 706961] +processed_samples 1500 unjoint_samples 1500 joint_samples 4 [1046455, 21037] +processed_samples 1500 unjoint_samples 1500 joint_samples 4 [1046455, 21037] +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [661646, 1047777] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [661646, 1047777] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [419174, 1042290] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [419174, 1042290] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [700648, 1046583] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [700648, 1046583] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1046481, 607786] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1046481, 607786] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [588301, 1046795] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [588301, 1046795] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1040561, 1015810] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1040561, 1015810] +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +processed_samples 1600 unjoint_samples 1600 joint_samples 4 [50547, 1037928] +processed_samples 1600 unjoint_samples 1600 joint_samples 4 [50547, 1037928] +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +processed_samples 1600 unjoint_samples 1600 joint_samples 4 [1047304, 258958] +processed_samples 1600 unjoint_samples 1600 joint_samples 4 [1047304, 258958] +processed_samples 1600 unjoint_samples 1600 joint_samples 4 [1046455, 354459] +processed_samples 1600 unjoint_samples 1600 joint_samples 4 [1046455, 354459] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [635419, 1042290] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [635419, 1042290] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [974082, 1046583] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [886619, 1047777] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [974082, 1046583] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [886619, 1047777] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [826193, 1046795] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [826193, 1046795] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [1046481, 924757] +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [1046481, 924757] +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1037406, 146559] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1037406, 146559] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [464083, 1037928] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [464083, 1037928] +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [128969, 1046795] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [128969, 1046795] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [114358, 1047746] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [114358, 1047746] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [211778, 1046583] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [211778, 1046583] +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +processed_samples 1700 unjoint_samples 1700 joint_samples 3 [1008073, 1042290] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1047304, 569053] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1047304, 569053] +processed_samples 1700 unjoint_samples 1700 joint_samples 3 [1008073, 1042290] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1046455, 683276] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1046455, 683276] +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +processed_samples 1800 unjoint_samples 1800 joint_samples 5 [65850, 988603] +processed_samples 1800 unjoint_samples 1800 joint_samples 5 [65850, 988603] +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1047638, 274269] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1037406, 408020] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1047638, 274269] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1037406, 408020] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [510030, 1046583] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [510030, 1046583] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [773713, 1037928] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [773713, 1037928] +[h264 @ 0x556c9f1ff900] mmco: unref short failure +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [399036, 1046795] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [399036, 1046795] +[h264 @ 0x55cecf993980] mmco: unref short failure +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [551352, 1047746] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [551352, 1047746] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1047304, 834636] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1047304, 834636] +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc6a30c0] mmco: unref short failure +[h264 @ 0x55cecc6a30c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc6a30c0] mmco: unref short failure +[h264 @ 0x55cecc6a30c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +processed_samples 1900 unjoint_samples 1900 joint_samples 5 [1007377, 234734] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1037406, 690076] +processed_samples 1900 unjoint_samples 1900 joint_samples 5 [82048, 1029094] +processed_samples 1900 unjoint_samples 1900 joint_samples 5 [308908, 988603] +processed_samples 1900 unjoint_samples 1900 joint_samples 5 [1007377, 234734] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1037406, 690076] +processed_samples 1900 unjoint_samples 1900 joint_samples 5 [82048, 1029094] +processed_samples 1900 unjoint_samples 1900 joint_samples 5 [308908, 988603] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1047638, 762784] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [722417, 1046795] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1047638, 762784] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [909102, 1047746] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [860586, 1046583] +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [722417, 1046795] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [909102, 1047746] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [860586, 1046583] +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [1029716, 310291] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [1029716, 310291] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [25471, 1046268] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [25471, 1046268] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [419386, 1029094] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [62625, 1046583] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [62625, 1046583] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [419386, 1029094] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [621476, 988603] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [621476, 988603] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [1007377, 551370] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [1007377, 551370] +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +processed_samples 2000 unjoint_samples 2000 joint_samples 4 [1037406, 974282] +processed_samples 2000 unjoint_samples 2000 joint_samples 4 [1037406, 974282] +processed_samples 2000 unjoint_samples 2000 joint_samples 4 [983529, 1046795] +processed_samples 2000 unjoint_samples 2000 joint_samples 4 [983529, 1046795] +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x556c9f02e200] mmco: unref short failure +[h264 @ 0x556c9f02e200] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1046863, 252962] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1046863, 252962] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1007377, 885449] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1007377, 885449] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [277149, 1046795] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [277149, 1046795] +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [430763, 1046268] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [451033, 1046583] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [451033, 1046583] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [430763, 1046268] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1029716, 661621] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1029716, 661621] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [716867, 1029094] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [716867, 1029094] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [993680, 992534] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [993680, 992534] +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +processed_samples 2200 unjoint_samples 2200 joint_samples 6 [363477, 1004520] +processed_samples 2200 unjoint_samples 2200 joint_samples 6 [57651, 1047091] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [731001, 1046268] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [1046863, 569867] +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [1044527, 1032768] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [537919, 1046795] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [821547, 1046583] +[h264 @ 0x55cecf993980] mmco: unref short failure +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [1029716, 945981] +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +processed_samples 2200 unjoint_samples 2200 joint_samples 6 [57651, 1047091] +processed_samples 2200 unjoint_samples 2200 joint_samples 6 [363477, 1004520] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [1046863, 569867] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [731001, 1046268] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [1044527, 1032768] +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [537919, 1046795] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [821547, 1046583] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [1029716, 945981] +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecc696840] mmco: unref short failure +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [19168, 1046764] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [19168, 1046764] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [274926, 1045138] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [162508, 1047431] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [274926, 1045138] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [728699, 1004520] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [728699, 1004520] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [162508, 1047431] +[h264 @ 0x55cecc696840] mmco: unref short failure +[h264 @ 0x55cecc696840] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [283695, 1047091] +processed_samples 2300 unjoint_samples 2300 joint_samples 5 [813633, 1046795] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [283695, 1047091] +processed_samples 2300 unjoint_samples 2300 joint_samples 5 [813633, 1046795] +processed_samples 2300 unjoint_samples 2300 joint_samples 5 [1046863, 864326] +processed_samples 2300 unjoint_samples 2300 joint_samples 5 [1046863, 864326] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [1047216, 4426] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [1047216, 4426] +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9b7845c0] mmco: unref short failure +[h264 @ 0x556c9b7845c0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +processed_samples 2400 unjoint_samples 2400 joint_samples 7 [1045977, 84936] +processed_samples 2400 unjoint_samples 2400 joint_samples 7 [1045977, 84936] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1046863, 127817] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1046863, 127817] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1047216, 294107] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1047216, 294107] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [521087, 1045138] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [521087, 1045138] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [532988, 1047091] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [532988, 1047091] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1048076, 60783] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1048076, 60783] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [370912, 1046764] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [370912, 1046764] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [432718, 1047431] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [432718, 1047431] +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x556c9c068800] mmco: unref short failure +[h264 @ 0x556c9c068800] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9c068800] mmco: unref short failure +[h264 @ 0x556c9c068800] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1046863, 631289] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1047216, 576775] +[h264 @ 0x55cecda179c0] mmco: unref short failure +processed_samples 2500 unjoint_samples 2500 joint_samples 7 [1045977, 436031] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [791244, 1046764] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1048076, 330011] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [742670, 1047431] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [821335, 1045138] +[h264 @ 0x556c9e009040] mmco: unref short failure +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1046863, 631289] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [833680, 1047091] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1047216, 576775] +[h264 @ 0x556c9b93f280] mmco: unref short failure +processed_samples 2500 unjoint_samples 2500 joint_samples 7 [1045977, 436031] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [791244, 1046764] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1048076, 330011] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [742670, 1047431] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [821335, 1045138] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [833680, 1047091] +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1045791, 61534] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1045791, 61534] +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [1046863, 913941] +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [1046863, 913941] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1007665, 133120] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1046415, 120796] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1045977, 657583] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1045977, 657583] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1007665, 133120] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1046415, 120796] +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [1048076, 605962] +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [1048076, 605962] +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [1044934, 1047431] +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [1044934, 1047431] +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [1047216, 963560] +[h264 @ 0x556ca0b64640] mmco: unref short failure +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [1047216, 963560] +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [132942, 1046540] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1045791, 420045] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1047216, 148883] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1047182, 234371] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1007665, 418509] +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1046415, 408082] +processed_samples 2700 unjoint_samples 2700 joint_samples 6 [1048076, 824076] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [132942, 1046540] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1045977, 995515] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1045791, 420045] +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1047216, 148883] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1047182, 234371] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1007665, 418509] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1046415, 408082] +processed_samples 2700 unjoint_samples 2700 joint_samples 6 [1048076, 824076] +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1045977, 995515] +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc338740] [h264 @ 0x556c9b373f40] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x556c9b6ce740] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +processed_samples 2800 unjoint_samples 2800 joint_samples 8 [245889, 1038816] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1047216, 557998] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [460328, 1046540] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1046415, 714845] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1045791, 653381] +processed_samples 2800 unjoint_samples 2800 joint_samples 8 [245889, 1038816] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1045791, 653381] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1047216, 557998] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [460328, 1046540] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1046415, 714845] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1007665, 872957] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1047182, 556913] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1007665, 872957] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1048076, 131384] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1048076, 131384] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1047182, 556913] +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecc07e5c0] mmco: unref short failure +[h264 @ 0x55cecc07e5c0] mmco: unref short failure +[h264 @ 0x55cecc07e5c0] mmco: unref short failure +[h264 @ 0x55cecc07e5c0] mmco: unref short failure +[h264 @ 0x55cecc07e5c0] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +processed_samples 2900 unjoint_samples 2900 joint_samples 8 [564777, 1038816] +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1047216, 854128] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1048076, 506168] +processed_samples 2900 unjoint_samples 2900 joint_samples 8 [174834, 1030643] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [779341, 1046540] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1045791, 1028495] +[h264 @ 0x556ca0b64640] mmco: unref short failure +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1046415, 957478] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1048076, 506168] +processed_samples 2900 unjoint_samples 2900 joint_samples 8 [174834, 1030643] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [779341, 1046540] +processed_samples 2900 unjoint_samples 2900 joint_samples 8 [564777, 1038816] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1047216, 854128] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1046415, 957478] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1047182, 832855] +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1045791, 1028495] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1047182, 832855] +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55ced2b76540] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55ced2b76540] mmco: unref short failure +[h264 @ 0x55ced2b76540] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [36571, 1046540] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [362819, 1045110] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [36571, 1046540] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [362819, 1045110] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [165916, 1025388] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [165916, 1025388] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [451570, 1010607] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [451570, 1010607] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [110079, 1046313] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [110079, 1046313] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [562039, 1030643] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [562039, 1030643] +processed_samples 3000 unjoint_samples 3000 joint_samples 7 [1048076, 763854] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [970591, 1038816] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [970591, 1038816] +processed_samples 3000 unjoint_samples 3000 joint_samples 7 [1048076, 763854] +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecc696840] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [529671, 1025388] +processed_samples 3100 unjoint_samples 3100 joint_samples 9 [1030176, 299351] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [467395, 1046313] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [368925, 1046540] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [368925, 1046540] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [529671, 1025388] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [670731, 1045110] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [735698, 1010607] +processed_samples 3100 unjoint_samples 3100 joint_samples 9 [1030176, 299351] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [998557, 1030643] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [467395, 1046313] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [998557, 1030643] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [735698, 1010607] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [670731, 1045110] +processed_samples 3100 unjoint_samples 3100 joint_samples 7 [1048076, 1021502] +processed_samples 3100 unjoint_samples 3100 joint_samples 7 [1048076, 1021502] +[h264 @ 0x556c9f58fd00] mmco: unref short failure +[h264 @ 0x556c9f58fd00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [269728, 1047100] +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [1009385, 1045110] +processed_samples 3200 unjoint_samples 3200 joint_samples 9 [227936, 1047199] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [905461, 1025388] +processed_samples 3200 unjoint_samples 3200 joint_samples 9 [1030176, 554950] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [689701, 1046540] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [1032594, 1031798] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [873635, 1046313] +[h264 @ 0x556c9b93f280] mmco: unref short failure +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [269728, 1047100] +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +processed_samples 3200 unjoint_samples 3200 joint_samples 9 [227936, 1047199] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [1009385, 1045110] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [905461, 1025388] +processed_samples 3200 unjoint_samples 3200 joint_samples 9 [1030176, 554950] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [689701, 1046540] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [1032594, 1031798] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [873635, 1046313] +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cec1144d40] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55ced04fa540] mmco: unref short failure +[h264 @ 0x55ced04fa540] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55ced2b76540] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556ca08317c0] mmco: unref short failure +[h264 @ 0x556ca08317c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [341408, 1034917] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [100240, 1046950] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [345994, 1047825] +processed_samples 3300 unjoint_samples 3300 joint_samples 8 [512690, 1047100] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [168580, 1031635] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [483370, 1047199] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [1030176, 824548] +processed_samples 3300 unjoint_samples 3300 joint_samples 8 [947263, 1046540] +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [341408, 1034917] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [100240, 1046950] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [168580, 1031635] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [345994, 1047825] +processed_samples 3300 unjoint_samples 3300 joint_samples 8 [512690, 1047100] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [483370, 1047199] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [1030176, 824548] +processed_samples 3300 unjoint_samples 3300 joint_samples 8 [947263, 1046540] +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9f1aaf40] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [636894, 1047825] +processed_samples 3400 unjoint_samples 3400 joint_samples 10 [62364, 1044753] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [254976, 1046540] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [355980, 1046950] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [735883, 1047199] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [735883, 1047199] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [636894, 1047825] +processed_samples 3400 unjoint_samples 3400 joint_samples 10 [62364, 1044753] +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [254976, 1046540] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [438034, 1031635] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [438034, 1031635] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [355980, 1046950] +processed_samples 3400 unjoint_samples 3400 joint_samples 8 [773854, 1047100] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [731021, 1034917] +[h264 @ 0x55cecca039c0] mmco: unref short failure +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [731021, 1034917] +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +processed_samples 3400 unjoint_samples 3400 joint_samples 8 [773854, 1047100] +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9b3f8d00] mmco: unref short failure +[h264 @ 0x556c9b3f8d00] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +processed_samples 3500 unjoint_samples 3500 joint_samples 10 [999083, 109673] +[h264 @ 0x55cecd1dc600] mmco: unref short failure +processed_samples 3500 unjoint_samples 3500 joint_samples 10 [320989, 1044753] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [709668, 1046950] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [626490, 1046540] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [809557, 1031635] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1047326, 1046664] +[h264 @ 0x556c9b61bec0] mmco: unref short failure +processed_samples 3500 unjoint_samples 3500 joint_samples 10 [999083, 109673] +processed_samples 3500 unjoint_samples 3500 joint_samples 10 [320989, 1044753] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [709668, 1046950] +processed_samples 3500 unjoint_samples 3500 joint_samples 8 [1029062, 1047100] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1047326, 1046664] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [626490, 1046540] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [809557, 1031635] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1031177, 1047199] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1031177, 1047199] +processed_samples 3500 unjoint_samples 3500 joint_samples 8 [1029062, 1047100] +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9f02e200] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1037998, 257843] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1047156, 83838] +processed_samples 3600 unjoint_samples 3600 joint_samples 9 [895167, 1046540] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [547994, 1044753] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [999083, 345600] +processed_samples 3600 unjoint_samples 3600 joint_samples 9 [1047662, 247730] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1047326, 289887] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [547994, 1044753] +processed_samples 3600 unjoint_samples 3600 joint_samples 9 [895167, 1046540] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [999083, 345600] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1037998, 257843] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1047156, 83838] +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +processed_samples 3600 unjoint_samples 3600 joint_samples 9 [1047662, 247730] +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1047326, 289887] +processed_samples 3600 unjoint_samples 3600 joint_samples 9 [1012547, 1046950] +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +processed_samples 3600 unjoint_samples 3600 joint_samples 9 [1012547, 1046950] +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556ca0002a80] mmco: unref short failure +[h264 @ 0x556ca0002a80] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9cd5d080] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9cd5d080] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1046521, 271408] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1046521, 271408] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1047156, 335097] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1047156, 335097] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [999083, 618131] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1037998, 582141] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1047326, 631318] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [999083, 618131] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1047326, 631318] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1043634, 146282] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1037998, 582141] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1043634, 146282] +processed_samples 3700 unjoint_samples 3700 joint_samples 9 [1047662, 533620] +processed_samples 3700 unjoint_samples 3700 joint_samples 9 [1047662, 533620] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [885953, 1044753] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [885953, 1044753] +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556ca0002a80] mmco: unref short failure +[h264 @ 0x556ca0002a80] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556ca0002a80] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9c068800] mmco: unref short failure +[h264 @ 0x556c9f02e200] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +processed_samples 3800 unjoint_samples 3800 joint_samples 11 [1038294, 146069] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1037998, 870166] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1047156, 550160] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1043634, 508455] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1046521, 573067] +processed_samples 3800 unjoint_samples 3800 joint_samples 11 [1038294, 146069] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [999083, 908601] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1043634, 508455] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1047326, 978606] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1046521, 573067] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1047156, 550160] +processed_samples 3800 unjoint_samples 3800 joint_samples 9 [1047662, 841273] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1037998, 870166] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1047326, 978606] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [999083, 908601] +processed_samples 3800 unjoint_samples 3800 joint_samples 9 [1047662, 841273] +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [1046521, 858141] +[h264 @ 0x556c9f75cb40] mmco: unref short failure +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [1046521, 858141] +[h264 @ 0x55cecd1dc600] mmco: unref short failure +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [176084, 1046762] +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [176084, 1046762] +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [415841, 1040988] +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [415841, 1040988] +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [1038294, 445137] +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [1038294, 445137] +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [312719, 1005364] +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [312719, 1005364] +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [1043634, 787148] +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [1043634, 787148] +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [231499, 1046148] +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [231499, 1046148] +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [1047156, 890562] +[h264 @ 0x556c9b93f280] mmco: unref short failure +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [1047156, 890562] +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cec1144d40] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55ceccedbd40] Invalid NAL unit size (1113487007 > 108878). +[h264 @ 0x55ceccedbd40] Error splitting the input into NAL units. +[h264 @ 0x556ca00b4a00] Invalid NAL unit size (1113487007 > 108878). +[h264 @ 0x556ca00b4a00] Error splitting the input into NAL units. +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [1045796, 78261] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [1045796, 78261] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [530581, 1005364] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [530581, 1005364] +processed_samples 4000 unjoint_samples 4000 joint_samples 10 [549810, 1046148] +processed_samples 4000 unjoint_samples 4000 joint_samples 10 [549810, 1046148] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [81223, 1044811] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [81223, 1044811] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [172160, 1040530] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [172160, 1040530] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [514695, 1046762] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [514695, 1046762] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [825955, 1040988] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [825955, 1040988] +processed_samples 4001 unjoint_samples 4000 joint_samples 11 [1038294, 736739] +processed_samples 4001 unjoint_samples 4000 joint_samples 11 [1038294, 736739] +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecd8b7f40] mmco: unref short failure +[h264 @ 0x55cecd8b7f40] mmco: unref short failure +[h264 @ 0x55cecd8b7f40] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecffb9d80] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [1045796, 467721] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [1045796, 467721] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [480156, 1040530] +processed_samples 4100 unjoint_samples 4100 joint_samples 12 [105650, 1047050] +processed_samples 4100 unjoint_samples 4100 joint_samples 12 [105650, 1047050] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [480156, 1040530] +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +processed_samples 4101 unjoint_samples 4100 joint_samples 12 [108332, 1036869] +processed_samples 4101 unjoint_samples 4100 joint_samples 12 [108332, 1036869] +processed_samples 4100 unjoint_samples 4100 joint_samples 10 [807770, 1046148] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [469569, 1044811] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [469569, 1044811] +processed_samples 4100 unjoint_samples 4100 joint_samples 10 [807770, 1046148] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [858942, 1005364] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [858942, 1005364] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [820212, 1046762] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [820212, 1046762] +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9ab96640] mmco: unref short failure +[h264 @ 0x556c9ab96640] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [813960, 1044811] +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +processed_samples 4200 unjoint_samples 4200 joint_samples 12 [1019391, 271812] +processed_samples 4200 unjoint_samples 4200 joint_samples 12 [428827, 1047050] +[h264 @ 0x55cecc62f840] mmco: unref short failure +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [1045796, 726280] +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [813960, 1044811] +processed_samples 4200 unjoint_samples 4200 joint_samples 12 [1019391, 271812] +processed_samples 4200 unjoint_samples 4200 joint_samples 12 [428827, 1047050] +[h264 @ 0x556ca011b780] mmco: unref short failure +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [1045796, 726280] +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [839855, 1040530] +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [1048209, 132610] +processed_samples 4201 unjoint_samples 4200 joint_samples 12 [424767, 1036869] +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [839855, 1040530] +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [1048209, 132610] +processed_samples 4200 unjoint_samples 4200 joint_samples 12 [1029003, 29534] +processed_samples 4201 unjoint_samples 4200 joint_samples 12 [424767, 1036869] +processed_samples 4200 unjoint_samples 4200 joint_samples 12 [1029003, 29534] +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9f3e3200] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x556c9c773b80] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [1047025, 99582] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [1019391, 560620] +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [1029003, 370240] +processed_samples 4300 unjoint_samples 4300 joint_samples 11 [1048209, 422954] +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +processed_samples 4300 unjoint_samples 4300 joint_samples 11 [1044951, 1047088] +processed_samples 4300 unjoint_samples 4300 joint_samples 11 [1045796, 1026978] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [665381, 1047050] +[h264 @ 0x55cecce63a00] mmco: unref short failure +processed_samples 4301 unjoint_samples 4300 joint_samples 12 [757854, 1036869] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [1047025, 99582] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [1029003, 370240] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [1019391, 560620] +processed_samples 4300 unjoint_samples 4300 joint_samples 11 [1048209, 422954] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [665381, 1047050] +processed_samples 4301 unjoint_samples 4300 joint_samples 12 [757854, 1036869] +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +processed_samples 4300 unjoint_samples 4300 joint_samples 11 [1044951, 1047088] +processed_samples 4300 unjoint_samples 4300 joint_samples 11 [1045796, 1026978] +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecd8b7f40] mmco: unref short failure +[h264 @ 0x55cecd8b7f40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9c068800] mmco: unref short failure +[h264 @ 0x556c9c068800] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [1047496, 282634] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [413554, 1047088] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [1019391, 824145] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [1047025, 495914] +processed_samples 4401 unjoint_samples 4400 joint_samples 13 [990621, 212164] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [1029003, 650413] +processed_samples 4400 unjoint_samples 4400 joint_samples 11 [1048209, 641309] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [1047496, 282634] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [413554, 1047088] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [1047025, 495914] +processed_samples 4401 unjoint_samples 4400 joint_samples 13 [990621, 212164] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [1019391, 824145] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [936413, 1047050] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [1029003, 650413] +processed_samples 4400 unjoint_samples 4400 joint_samples 11 [1048209, 641309] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [936413, 1047050] +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +processed_samples 4500 unjoint_samples 4500 joint_samples 13 [1046517, 229068] +processed_samples 4500 unjoint_samples 4500 joint_samples 13 [1031855, 121431] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [733032, 1047088] +processed_samples 4500 unjoint_samples 4500 joint_samples 13 [1046517, 229068] +processed_samples 4501 unjoint_samples 4500 joint_samples 13 [990621, 672613] +processed_samples 4500 unjoint_samples 4500 joint_samples 13 [1031855, 121431] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [1047496, 640984] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [1047496, 640984] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [733032, 1047088] +processed_samples 4500 unjoint_samples 4500 joint_samples 11 [1048209, 975731] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [1029003, 916095] +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +processed_samples 4501 unjoint_samples 4500 joint_samples 13 [990621, 672613] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [1047025, 761667] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [1047025, 761667] +processed_samples 4500 unjoint_samples 4500 joint_samples 11 [1048209, 975731] +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [1029003, 916095] +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [1031855, 410903] +[h264 @ 0x55cecf1bf040] mmco: unref short failure +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [1031855, 410903] +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [1046517, 551228] +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [1046517, 551228] +processed_samples 4600 unjoint_samples 4600 joint_samples 12 [303600, 1038356] +processed_samples 4600 unjoint_samples 4600 joint_samples 12 [303600, 1038356] +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [164065, 1024368] +processed_samples 4601 unjoint_samples 4600 joint_samples 14 [107683, 997763] +[h264 @ 0x55cecc7f9480] mmco: unref short failure +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [160482, 1046059] +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [164065, 1024368] +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [160482, 1046059] +processed_samples 4601 unjoint_samples 4600 joint_samples 14 [107683, 997763] +[h264 @ 0x556c9b8eee80] mmco: unref short failure +processed_samples 4600 unjoint_samples 4600 joint_samples 12 [1047496, 986818] +processed_samples 4600 unjoint_samples 4600 joint_samples 12 [1046253, 1047088] +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +processed_samples 4600 unjoint_samples 4600 joint_samples 12 [1047496, 986818] +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +processed_samples 4600 unjoint_samples 4600 joint_samples 12 [1046253, 1047088] +[h264 @ 0x55cec1144d40] mmco: unref short failure +[h264 @ 0x55cec1144d40] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cece116400] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecc696840] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecc696840] mmco: unref short failure +[h264 @ 0x55cecc696840] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1047496, 272075] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1048355, 384838] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1031855, 695974] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1048355, 384838] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1047496, 272075] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [460311, 1046059] +processed_samples 4701 unjoint_samples 4700 joint_samples 14 [429686, 997763] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1031855, 695974] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [460311, 1046059] +processed_samples 4701 unjoint_samples 4700 joint_samples 14 [429686, 997763] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [381775, 1024368] +processed_samples 4700 unjoint_samples 4700 joint_samples 12 [555941, 1038356] +processed_samples 4700 unjoint_samples 4700 joint_samples 12 [555941, 1038356] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [381775, 1024368] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1046517, 916212] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1046517, 916212] +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +processed_samples 4800 unjoint_samples 4800 joint_samples 14 [1047445, 218533] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [1035880, 1035831] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [672104, 1024368] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [1047496, 552339] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [1048355, 750415] +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +processed_samples 4800 unjoint_samples 4800 joint_samples 12 [1043761, 1040787] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [977065, 1046059] +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +processed_samples 4801 unjoint_samples 4800 joint_samples 14 [714739, 997763] +processed_samples 4800 unjoint_samples 4800 joint_samples 14 [1047445, 218533] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [672104, 1024368] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [1035880, 1035831] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [1047496, 552339] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [1048355, 750415] +processed_samples 4800 unjoint_samples 4800 joint_samples 12 [1043761, 1040787] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [977065, 1046059] +[h264 @ 0x55cecfec0780] mmco: unref short failure +processed_samples 4801 unjoint_samples 4800 joint_samples 14 [714739, 997763] +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [9684, 1046412] +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [9684, 1046412] +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [1044593, 251548] +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [1044593, 251548] +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [182448, 1046059] +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [182448, 1046059] +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [1047445, 539643] +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [275599, 1045396] +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [275599, 1045396] +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [1047496, 904962] +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [1047496, 904962] +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [1047445, 539643] +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [943010, 1024368] +processed_samples 4901 unjoint_samples 4900 joint_samples 14 [960623, 997763] +processed_samples 4901 unjoint_samples 4900 joint_samples 14 [960623, 997763] +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [943010, 1024368] +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [1047574, 126023] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [1047574, 126023] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [409451, 1046412] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [1032591, 245190] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [409451, 1046412] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [1032591, 245190] +processed_samples 5000 unjoint_samples 5000 joint_samples 13 [636338, 1045396] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [1044593, 627910] +processed_samples 5000 unjoint_samples 5000 joint_samples 13 [636338, 1045396] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [1044593, 627910] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [1047445, 808068] +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [707167, 1046059] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [707167, 1046059] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [1047445, 808068] +processed_samples 5001 unjoint_samples 5000 joint_samples 15 [1046843, 177255] +processed_samples 5001 unjoint_samples 5000 joint_samples 15 [1046843, 177255] +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9b6ce740] mmco: unref short failure +[h264 @ 0x556c9b6ce740] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9f902240] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [1047574, 417533] +processed_samples 5100 unjoint_samples 5100 joint_samples 15 [141323, 1043912] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [728261, 1046412] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [1047574, 417533] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [1044593, 1015166] +processed_samples 5101 unjoint_samples 5100 joint_samples 15 [1046843, 437553] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [1032591, 579651] +processed_samples 5100 unjoint_samples 5100 joint_samples 15 [141323, 1043912] +processed_samples 5100 unjoint_samples 5100 joint_samples 13 [943885, 1045396] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [728261, 1046412] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [1044593, 1015166] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [982696, 1046059] +processed_samples 5101 unjoint_samples 5100 joint_samples 15 [1046843, 437553] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [1032591, 579651] +processed_samples 5100 unjoint_samples 5100 joint_samples 13 [943885, 1045396] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [982696, 1046059] +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +processed_samples 5200 unjoint_samples 5200 joint_samples 15 [285640, 1026728] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1047574, 754810] +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1047093, 223468] +processed_samples 5200 unjoint_samples 5200 joint_samples 15 [492673, 1043912] +processed_samples 5200 unjoint_samples 5200 joint_samples 15 [1047185, 220810] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1032591, 915657] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1002546, 1046412] +processed_samples 5201 unjoint_samples 5200 joint_samples 15 [1046843, 653392] +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +processed_samples 5200 unjoint_samples 5200 joint_samples 15 [285640, 1026728] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1047093, 223468] +processed_samples 5200 unjoint_samples 5200 joint_samples 15 [492673, 1043912] +processed_samples 5200 unjoint_samples 5200 joint_samples 15 [1047185, 220810] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1032591, 915657] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1002546, 1046412] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1047574, 754810] +processed_samples 5201 unjoint_samples 5200 joint_samples 15 [1046843, 653392] +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x55cecccaa700] mmco: unref short failure +[h264 @ 0x55cecccaa700] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556ca08317c0] mmco: unref short failure +[h264 @ 0x556ca08317c0] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [308024, 929868] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [308024, 929868] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [711600, 1026728] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [711600, 1026728] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [780760, 1043912] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [417097, 1038771] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [780760, 1043912] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [417097, 1038771] +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [1047185, 605943] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [1047185, 605943] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [1043018, 217001] +processed_samples 5300 unjoint_samples 5300 joint_samples 14 [1047093, 528401] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [1043018, 217001] +processed_samples 5300 unjoint_samples 5300 joint_samples 14 [1047093, 528401] +processed_samples 5301 unjoint_samples 5300 joint_samples 15 [1046843, 922313] +processed_samples 5301 unjoint_samples 5300 joint_samples 15 [1046843, 922313] +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9f02e200] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +processed_samples 5400 unjoint_samples 5400 joint_samples 16 [46713, 1043912] +processed_samples 5400 unjoint_samples 5400 joint_samples 16 [46713, 1043912] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [741822, 929868] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1043018, 599722] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [741822, 929868] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1043018, 599722] +processed_samples 5401 unjoint_samples 5400 joint_samples 16 [1046843, 163520] +processed_samples 5401 unjoint_samples 5400 joint_samples 16 [1046843, 163520] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [931640, 1026728] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [931640, 1026728] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [731583, 1038771] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [731583, 1038771] +processed_samples 5400 unjoint_samples 5400 joint_samples 14 [1047093, 808698] +processed_samples 5400 unjoint_samples 5400 joint_samples 14 [1047093, 808698] +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1047185, 960652] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1047185, 960652] +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9cd13e00] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +processed_samples 5500 unjoint_samples 5500 joint_samples 16 [1044614, 234197] +processed_samples 5500 unjoint_samples 5500 joint_samples 16 [1044614, 234197] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [1016990, 1024578] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [1016990, 1024578] +processed_samples 5500 unjoint_samples 5500 joint_samples 16 [1038949, 73140] +processed_samples 5500 unjoint_samples 5500 joint_samples 16 [1038949, 73140] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [184263, 1046174] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [184263, 1046174] +processed_samples 5500 unjoint_samples 5500 joint_samples 16 [229520, 1040348] +processed_samples 5500 unjoint_samples 5500 joint_samples 16 [229520, 1040348] +processed_samples 5500 unjoint_samples 5500 joint_samples 16 [419509, 1043912] +processed_samples 5500 unjoint_samples 5500 joint_samples 16 [419509, 1043912] +processed_samples 5501 unjoint_samples 5500 joint_samples 16 [1046843, 637023] +processed_samples 5501 unjoint_samples 5500 joint_samples 16 [1046843, 637023] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [1043018, 900610] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [1043018, 900610] +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [453055, 1026803] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [1044614, 636117] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [453055, 1026803] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [1044614, 636117] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [1038949, 522671] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [1038949, 522671] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [1047480, 126845] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [1047480, 126845] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [566888, 1040348] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [566888, 1040348] +processed_samples 5600 unjoint_samples 5600 joint_samples 15 [531817, 1046174] +processed_samples 5600 unjoint_samples 5600 joint_samples 15 [531817, 1046174] +processed_samples 5601 unjoint_samples 5600 joint_samples 17 [1046843, 7370] +processed_samples 5601 unjoint_samples 5600 joint_samples 17 [1046843, 7370] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [760604, 1043912] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [760604, 1043912] +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9f1aaf40] mmco: unref short failure +[h264 @ 0x556c9f1aaf40] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9f1aaf40] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [793583, 1026803] +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1047480, 459391] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [824994, 1040348] +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1044614, 1001329] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1038949, 854032] +processed_samples 5701 unjoint_samples 5700 joint_samples 17 [1046843, 413688] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [793583, 1026803] +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1029787, 1043912] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1047480, 459391] +processed_samples 5700 unjoint_samples 5700 joint_samples 15 [924579, 1046174] +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [824994, 1040348] +processed_samples 5701 unjoint_samples 5700 joint_samples 17 [1046843, 413688] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1044614, 1001329] +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +processed_samples 5700 unjoint_samples 5700 joint_samples 15 [924579, 1046174] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1038949, 854032] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1029787, 1043912] +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +processed_samples 5800 unjoint_samples 5800 joint_samples 17 [1046910, 200671] +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1043191, 1034404] +processed_samples 5800 unjoint_samples 5800 joint_samples 17 [1046910, 200671] +processed_samples 5800 unjoint_samples 5800 joint_samples 17 [1041447, 230733] +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +processed_samples 5800 unjoint_samples 5800 joint_samples 17 [1041447, 230733] +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +processed_samples 5800 unjoint_samples 5800 joint_samples 17 [1030299, 151175] +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1043191, 1034404] +processed_samples 5800 unjoint_samples 5800 joint_samples 17 [1038949, 216633] +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1036287, 305262] +processed_samples 5800 unjoint_samples 5800 joint_samples 17 [1030299, 151175] +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1036287, 305262] +processed_samples 5800 unjoint_samples 5800 joint_samples 17 [1038949, 216633] +processed_samples 5801 unjoint_samples 5800 joint_samples 17 [1046843, 755978] +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1047480, 801407] +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1047480, 801407] +processed_samples 5801 unjoint_samples 5800 joint_samples 17 [1046843, 755978] +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cec1144d40] mmco: unref short failure +[h264 @ 0x55cec1144d40] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1047480, 74754] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1047480, 74754] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [349614, 1041041] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [349614, 1041041] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1046910, 574275] +processed_samples 5901 unjoint_samples 5900 joint_samples 18 [1046843, 53452] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1046910, 574275] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1030299, 429357] +processed_samples 5901 unjoint_samples 5900 joint_samples 18 [1046843, 53452] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1041447, 557915] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1030299, 429357] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1038949, 547485] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1041447, 557915] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1038949, 547485] +processed_samples 5900 unjoint_samples 5900 joint_samples 16 [1036287, 653865] +processed_samples 5900 unjoint_samples 5900 joint_samples 16 [1036287, 653865] +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [701139, 1041041] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [701139, 1041041] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1041447, 799334] +processed_samples 6000 unjoint_samples 6000 joint_samples 18 [31938, 1046589] +processed_samples 6000 unjoint_samples 6000 joint_samples 18 [31938, 1046589] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1047480, 474922] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1047480, 474922] +processed_samples 6000 unjoint_samples 6000 joint_samples 16 [1036287, 983449] +processed_samples 6000 unjoint_samples 6000 joint_samples 16 [1036287, 983449] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1041447, 799334] +processed_samples 6001 unjoint_samples 6000 joint_samples 18 [1046843, 421598] +processed_samples 6001 unjoint_samples 6000 joint_samples 18 [1046843, 421598] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1046910, 844864] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1046910, 844864] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1038949, 849814] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1038949, 849814] +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9f902240] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9ed68f00] mmco: unref short failure +[h264 @ 0x556c9ed68f00] mmco: unref short failure +[h264 @ 0x556c9ed68f00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556ca08317c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[Errno 19] No such device: 'data_2' +[Errno 19] No such device: 'data_2' +[Errno 19] No such device: 'data_2' +[Errno 19] No such device: 'data_2'[Errno 19] No such device: 'data_2'[Errno 19] No such device: 'data_2' + + +[Errno 19] No such device: 'data_2'[Errno 19] No such device: 'data_2'[Errno 19] No such device: 'data_2' +[Errno 19] No such device: 'data_2' + +[Errno 19] No such device: 'data_2' + +[Errno 19] No such device: 'data_2'[Errno 19] No such device: 'data_2'[Errno 19] No such device: 'data_2' + + +[Errno 19] No such device: 'data_2' +[Errno 19] No such device: 'data_2' +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +processed_samples 6100 unjoint_samples 6100 joint_samples 18 [444982, 1046589] +processed_samples 6100 unjoint_samples 6100 joint_samples 18 [95759, 1038863] +processed_samples 6100 unjoint_samples 6100 joint_samples 18 [444982, 1046589] +processed_samples 6100 unjoint_samples 6100 joint_samples 18 [53398, 1045520] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [277975, 1047168] +processed_samples 6100 unjoint_samples 6100 joint_samples 18 [95759, 1038863] +processed_samples 6100 unjoint_samples 6100 joint_samples 18 [1045817, 64851] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [277975, 1047168] +processed_samples 6100 unjoint_samples 6100 joint_samples 18 [53398, 1045520] +processed_samples 6100 unjoint_samples 6100 joint_samples 18 [1045817, 64851] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [1047480, 775096] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [1047480, 775096] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [1001049, 1041041] +processed_samples 6101 unjoint_samples 6100 joint_samples 18 [1046843, 741907] +processed_samples 6101 unjoint_samples 6100 joint_samples 18 [1046843, 741907] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [1001049, 1041041] +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9f902240] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9f902240] mmco: unref short failure +[h264 @ 0x556c9f902240] mmco: unref short failure +[h264 @ 0x556c9f902240] mmco: unref short failure +[h264 @ 0x556c9f902240] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [349694, 1041041] +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [358805, 1045520] +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [358805, 1045520] +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [349694, 1041041] +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [550045, 1047168] +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [550045, 1047168] +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [1045817, 298872] +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [695126, 1046589] +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [1045817, 298872] +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [466664, 1038863] +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [1047480, 1045062] +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [466664, 1038863] +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [695126, 1046589] +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [1047480, 1045062] +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +processed_samples 6201 unjoint_samples 6200 joint_samples 19 [49738, 1040224] +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +processed_samples 6201 unjoint_samples 6200 joint_samples 19 [49738, 1040224] +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9f902240] mmco: unref short failure +[h264 @ 0x556c9f902240] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc696840] mmco: unref short failure +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1045817, 538221] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1047480, 242956] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1001775, 1046589] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [714752, 1041041] +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [617659, 1045520] +processed_samples 6301 unjoint_samples 6300 joint_samples 19 [354293, 1040224] +processed_samples 6300 unjoint_samples 6300 joint_samples 17 [858044, 1047168] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1047480, 242956] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1045817, 538221] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [753838, 1038863] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [617659, 1045520] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [714752, 1041041] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1001775, 1046589] +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [753838, 1038863] +processed_samples 6301 unjoint_samples 6300 joint_samples 19 [354293, 1040224] +processed_samples 6300 unjoint_samples 6300 joint_samples 17 [858044, 1047168] +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +processed_samples 6400 unjoint_samples 6400 joint_samples 19 [1043636, 236895] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1045817, 918970] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1047480, 494566] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [974094, 1041041] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [873890, 1045520] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [71557, 1047168] +processed_samples 6400 unjoint_samples 6400 joint_samples 19 [1043636, 236895] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [974094, 1041041] +processed_samples 6401 unjoint_samples 6400 joint_samples 19 [792845, 1040224] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1047480, 494566] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [873890, 1045520] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [71557, 1047168] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1045817, 918970] +processed_samples 6401 unjoint_samples 6400 joint_samples 19 [792845, 1040224] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1010068, 1038863] +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1010068, 1038863] +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecc6a30c0] mmco: unref short failure +[h264 @ 0x55cecc6a30c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecc6a30c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc6a30c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [1046624, 72093] +[h264 @ 0x556c9d6704c0] mmco: unref short failure +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [1046624, 72093] +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [232152, 1047935] +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [232152, 1047935] +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [1033947, 258228] +processed_samples 6501 unjoint_samples 6500 joint_samples 20 [94245, 1042490] +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [135288, 1042069] +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [1033947, 258228] +processed_samples 6501 unjoint_samples 6500 joint_samples 20 [94245, 1042490] +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [135288, 1042069] +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [392232, 1047168] +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [392232, 1047168] +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [1047480, 802510] +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [1043636, 602070] +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [1047480, 802510] +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [1043636, 602070] +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55ced043b580] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [195565, 1046944] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [434489, 1042069] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [523888, 1047935] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1046624, 452347] +processed_samples 6601 unjoint_samples 6600 joint_samples 20 [336365, 1042490] +[h264 @ 0x556ca103f1c0] mmco: unref short failure +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1043636, 892069] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1033947, 579276] +processed_samples 6600 unjoint_samples 6600 joint_samples 18 [668986, 1047168] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [523888, 1047935] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [434489, 1042069] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [195565, 1046944] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1046624, 452347] +processed_samples 6601 unjoint_samples 6600 joint_samples 20 [336365, 1042490] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1033947, 579276] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1043636, 892069] +[h264 @ 0x55ced0bab780] mmco: unref short failure +processed_samples 6600 unjoint_samples 6600 joint_samples 18 [668986, 1047168] +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cec1177100] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9c068800] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9c068800] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [421824, 1046944] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1046624, 642450] +[h264 @ 0x55cecf3fb740] mmco: unref short failure +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [855318, 1047935] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [421824, 1046944] +processed_samples 6700 unjoint_samples 6700 joint_samples 20 [122436, 1046909] +[h264 @ 0x556c9f547d00] mmco: unref short failure +processed_samples 6700 unjoint_samples 6700 joint_samples 20 [122436, 1046909] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [855318, 1047935] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1033947, 858054] +processed_samples 6700 unjoint_samples 6700 joint_samples 18 [906494, 1047168] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1046624, 642450] +processed_samples 6700 unjoint_samples 6700 joint_samples 18 [906494, 1047168] +[h264 @ 0x55cecc475dc0] mmco: unref short failure +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [745938, 1042069] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1033947, 858054] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [745938, 1042069] +processed_samples 6701 unjoint_samples 6700 joint_samples 20 [754161, 1042490] +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +processed_samples 6701 unjoint_samples 6700 joint_samples 20 [754161, 1042490] +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +processed_samples 6800 unjoint_samples 6800 joint_samples 20 [1047262, 39942] +processed_samples 6800 unjoint_samples 6800 joint_samples 20 [1047262, 39942] +processed_samples 6800 unjoint_samples 6800 joint_samples 20 [1047663, 19687] +processed_samples 6800 unjoint_samples 6800 joint_samples 20 [1047663, 19687] +processed_samples 6800 unjoint_samples 6800 joint_samples 20 [48599, 1047935] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [132648, 1047168] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [132648, 1047168] +processed_samples 6800 unjoint_samples 6800 joint_samples 20 [48599, 1047935] +processed_samples 6800 unjoint_samples 6800 joint_samples 20 [497274, 1046909] +processed_samples 6800 unjoint_samples 6800 joint_samples 20 [497274, 1046909] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [724753, 1046944] +processed_samples 6801 unjoint_samples 6800 joint_samples 21 [1041603, 58006] +processed_samples 6801 unjoint_samples 6800 joint_samples 21 [1041603, 58006] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [724753, 1046944] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [1046624, 956654] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [1046624, 956654] +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [988386, 78333] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [988386, 78333] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [321359, 1047935] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [1047663, 312621] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [321359, 1047935] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [1047663, 312621] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [194260, 1045219] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [194260, 1045219] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [1047262, 390454] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [1047262, 390454] +processed_samples 6901 unjoint_samples 6900 joint_samples 21 [1041603, 436313] +processed_samples 6901 unjoint_samples 6900 joint_samples 21 [1041603, 436313] +processed_samples 6900 unjoint_samples 6900 joint_samples 19 [416972, 1047168] +processed_samples 6900 unjoint_samples 6900 joint_samples 19 [416972, 1047168] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [773377, 1046909] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [773377, 1046909] +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556ca08317c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55ced2b76540] mmco: unref short failure +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [1047663, 596418] +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +processed_samples 7000 unjoint_samples 7000 joint_samples 21 [110789, 1046909] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [988386, 396296] +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [1047663, 596418] +processed_samples 7000 unjoint_samples 7000 joint_samples 19 [764748, 1047168] +processed_samples 7000 unjoint_samples 7000 joint_samples 21 [110789, 1046909] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [988386, 396296] +processed_samples 7000 unjoint_samples 7000 joint_samples 19 [764748, 1047168] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [1047262, 688375] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [565125, 1047935] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [549623, 1045219] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [549623, 1045219] +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [1047262, 688375] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [565125, 1047935] +processed_samples 7001 unjoint_samples 7000 joint_samples 21 [1041603, 876659] +processed_samples 7001 unjoint_samples 7000 joint_samples 21 [1041603, 876659] +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [988386, 711704] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [988386, 711704] +processed_samples 7100 unjoint_samples 7100 joint_samples 21 [354423, 1046909] +processed_samples 7101 unjoint_samples 7100 joint_samples 22 [90390, 1047359] +processed_samples 7101 unjoint_samples 7100 joint_samples 22 [90390, 1047359] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [911498, 1045219] +processed_samples 7100 unjoint_samples 7100 joint_samples 21 [354423, 1046909] +processed_samples 7100 unjoint_samples 7100 joint_samples 19 [978471, 1047168] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [984870, 1047935] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [1047262, 978665] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [1047663, 956561] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [911498, 1045219] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [984870, 1047935] +processed_samples 7100 unjoint_samples 7100 joint_samples 19 [978471, 1047168] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [1047262, 978665] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [1047663, 956561] +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [1047111, 237960] +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [1039308, 218391] +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [277874, 1019036] +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [260151, 1047247] +processed_samples 7201 unjoint_samples 7200 joint_samples 22 [358004, 1047359] +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +processed_samples 7200 unjoint_samples 7200 joint_samples 20 [1041041, 342224] +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [686624, 1046909] +[h264 @ 0x556c9be4ff40] mmco: unref short failure +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [1047111, 237960] +processed_samples 7200 unjoint_samples 7200 joint_samples 20 [1040528, 1040822] +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [1039308, 218391] +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [277874, 1019036] +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [260151, 1047247] +processed_samples 7201 unjoint_samples 7200 joint_samples 22 [358004, 1047359] +processed_samples 7200 unjoint_samples 7200 joint_samples 20 [1041041, 342224] +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [686624, 1046909] +[h264 @ 0x55cecfec0780] mmco: unref short failure +processed_samples 7200 unjoint_samples 7200 joint_samples 20 [1040528, 1040822] +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +processed_samples 7300 unjoint_samples 7300 joint_samples 22 [8292, 1046909] +processed_samples 7300 unjoint_samples 7300 joint_samples 22 [8292, 1046909] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [1047111, 525993] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [1047111, 525993] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [1046716, 260919] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [1046716, 260919] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [1039308, 574698] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [643626, 1047247] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [643626, 1047247] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [1039308, 574698] +processed_samples 7300 unjoint_samples 7300 joint_samples 20 [1041041, 688196] +processed_samples 7300 unjoint_samples 7300 joint_samples 20 [1041041, 688196] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [575682, 1019036] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [575682, 1019036] +processed_samples 7301 unjoint_samples 7300 joint_samples 22 [676234, 1047359] +processed_samples 7301 unjoint_samples 7300 joint_samples 22 [676234, 1047359] +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecc7f9480] illegal short term buffer state detected +[h264 @ 0x556c9b93f280] illegal short term buffer state detected +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecd8b7f40] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556ca1b79b80] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [1039308, 976356] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [1039308, 976356] +processed_samples 7400 unjoint_samples 7400 joint_samples 20 [1041041, 956200] +processed_samples 7400 unjoint_samples 7400 joint_samples 22 [79545, 1033569] +processed_samples 7400 unjoint_samples 7400 joint_samples 22 [334582, 1046909] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [1046716, 582796] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [1046716, 582796] +processed_samples 7400 unjoint_samples 7400 joint_samples 20 [1041041, 956200] +processed_samples 7400 unjoint_samples 7400 joint_samples 22 [79545, 1033569] +processed_samples 7400 unjoint_samples 7400 joint_samples 22 [334582, 1046909] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [1047111, 802078] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [900958, 1047247] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [1047111, 802078] +processed_samples 7401 unjoint_samples 7400 joint_samples 22 [1031178, 1047359] +processed_samples 7401 unjoint_samples 7400 joint_samples 22 [1031178, 1047359] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [900958, 1047247] +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +processed_samples 7500 unjoint_samples 7500 joint_samples 22 [113059, 1034334] +processed_samples 7500 unjoint_samples 7500 joint_samples 22 [346954, 1023096] +processed_samples 7500 unjoint_samples 7500 joint_samples 22 [113059, 1034334] +processed_samples 7500 unjoint_samples 7500 joint_samples 22 [346954, 1023096] +processed_samples 7500 unjoint_samples 7500 joint_samples 22 [388068, 1033569] +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [174355, 1042017] +processed_samples 7500 unjoint_samples 7500 joint_samples 22 [388068, 1033569] +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [174355, 1042017] +processed_samples 7500 unjoint_samples 7500 joint_samples 22 [164874, 1047247] +processed_samples 7500 unjoint_samples 7500 joint_samples 22 [700343, 1046909] +processed_samples 7501 unjoint_samples 7500 joint_samples 23 [327476, 1047359] +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [1046716, 932268] +processed_samples 7501 unjoint_samples 7500 joint_samples 23 [327476, 1047359] +processed_samples 7500 unjoint_samples 7500 joint_samples 22 [700343, 1046909] +processed_samples 7500 unjoint_samples 7500 joint_samples 22 [164874, 1047247] +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [1046716, 932268] +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [991836, 1046909] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [122262, 1044468] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [410018, 1034334] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [672884, 1023096] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [515512, 1047247] +processed_samples 7600 unjoint_samples 7600 joint_samples 21 [488452, 1042017] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [991836, 1046909] +[h264 @ 0x556c9be345c0] mmco: unref short failure +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [122262, 1044468] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [410018, 1034334] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [672884, 1023096] +processed_samples 7601 unjoint_samples 7600 joint_samples 23 [574077, 1047359] +processed_samples 7600 unjoint_samples 7600 joint_samples 21 [488452, 1042017] +[h264 @ 0x55cecda179c0] mmco: unref short failure +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [515512, 1047247] +processed_samples 7601 unjoint_samples 7600 joint_samples 23 [574077, 1047359] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [721644, 1033569] +[h264 @ 0x556c9aa36f00] mmco: unref short failure +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [721644, 1033569] +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +processed_samples 7700 unjoint_samples 7700 joint_samples 23 [226180, 1046909] +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556ca08317c0] mmco: unref short failure +[h264 @ 0x556ca08317c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [453110, 1044468] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [950350, 1023096] +processed_samples 7701 unjoint_samples 7700 joint_samples 23 [910000, 1047359] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [697113, 1034334] +processed_samples 7700 unjoint_samples 7700 joint_samples 21 [762308, 1042017] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [875974, 1047247] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [1039666, 1041197] +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +processed_samples 7700 unjoint_samples 7700 joint_samples 23 [226180, 1046909] +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [453110, 1044468] +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [875974, 1047247] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [950350, 1023096] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [697113, 1034334] +processed_samples 7700 unjoint_samples 7700 joint_samples 21 [762308, 1042017] +processed_samples 7701 unjoint_samples 7700 joint_samples 23 [910000, 1047359] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [1039666, 1041197] +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9c773b80] mmco: unref short failure +[h264 @ 0x556c9c773b80] mmco: unref short failure +[h264 @ 0x556c9c773b80] mmco: unref short failure +[h264 @ 0x556c9c773b80] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9b7845c0] mmco: unref short failure +[h264 @ 0x556c9b7845c0] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9b7845c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +processed_samples 7800 unjoint_samples 7800 joint_samples 23 [1044264, 137149] +processed_samples 7800 unjoint_samples 7800 joint_samples 23 [979221, 361716] +processed_samples 7801 unjoint_samples 7800 joint_samples 24 [197087, 1047359] +processed_samples 7800 unjoint_samples 7800 joint_samples 23 [1047082, 213488] +processed_samples 7800 unjoint_samples 7800 joint_samples 23 [979221, 361716] +processed_samples 7800 unjoint_samples 7800 joint_samples 23 [1044264, 137149] +processed_samples 7800 unjoint_samples 7800 joint_samples 23 [563681, 1046909] +processed_samples 7801 unjoint_samples 7800 joint_samples 24 [197087, 1047359] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [1015562, 1034334] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [750134, 1044468] +processed_samples 7800 unjoint_samples 7800 joint_samples 23 [1047082, 213488] +processed_samples 7800 unjoint_samples 7800 joint_samples 23 [563681, 1046909] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [750134, 1044468] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [1015562, 1034334] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [1046788, 4582] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [1046788, 4582] +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cec1144d40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9f3e3200] mmco: unref short failure +[h264 @ 0x55cecfa7a380] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [1044264, 389597] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [289472, 1046160] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [1047082, 561260] +processed_samples 7900 unjoint_samples 7900 joint_samples 22 [1046788, 351753] +processed_samples 7901 unjoint_samples 7900 joint_samples 24 [487625, 1047359] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [1032686, 1046909] +processed_samples 7900 unjoint_samples 7900 joint_samples 22 [1021853, 1044468] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [979221, 908401] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [1044264, 389597] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [289472, 1046160] +[h264 @ 0x55cecca4b500] mmco: unref short failure +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [1047082, 561260] +processed_samples 7900 unjoint_samples 7900 joint_samples 22 [1046788, 351753] +processed_samples 7901 unjoint_samples 7900 joint_samples 24 [487625, 1047359] +processed_samples 7900 unjoint_samples 7900 joint_samples 22 [1021853, 1044468] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [1032686, 1046909] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [979221, 908401] +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecc6a30c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9f02e200] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [616292, 1046160] +processed_samples 8000 unjoint_samples 8000 joint_samples 24 [103504, 1038211] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [232555, 1047986] +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +processed_samples 8000 unjoint_samples 8000 joint_samples 24 [103504, 1038211] +processed_samples 8000 unjoint_samples 8000 joint_samples 24 [339543, 1046909] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [1047082, 860927] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [616292, 1046160] +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +processed_samples 8000 unjoint_samples 8000 joint_samples 24 [339543, 1046909] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [232555, 1047986] +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +processed_samples 8001 unjoint_samples 8000 joint_samples 24 [718436, 1047359] +processed_samples 8000 unjoint_samples 8000 joint_samples 22 [1046788, 650901] +processed_samples 8000 unjoint_samples 8000 joint_samples 22 [1046788, 650901] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [1047082, 860927] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [1044264, 839667] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [1044264, 839667] +processed_samples 8001 unjoint_samples 8000 joint_samples 24 [718436, 1047359] +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b25a680] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +processed_samples 8100 unjoint_samples 8100 joint_samples 24 [187754, 1047059] +processed_samples 8100 unjoint_samples 8100 joint_samples 24 [40536, 1045626] +processed_samples 8100 unjoint_samples 8100 joint_samples 24 [477802, 1038211] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [507456, 1047986] +processed_samples 8100 unjoint_samples 8100 joint_samples 24 [638131, 1046909] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [978386, 1046160] +processed_samples 8101 unjoint_samples 8100 joint_samples 24 [973752, 1047359] +processed_samples 8100 unjoint_samples 8100 joint_samples 24 [477802, 1038211] +processed_samples 8100 unjoint_samples 8100 joint_samples 22 [1046788, 971860] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [978386, 1046160] +processed_samples 8100 unjoint_samples 8100 joint_samples 24 [187754, 1047059] +processed_samples 8100 unjoint_samples 8100 joint_samples 24 [40536, 1045626] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [507456, 1047986] +processed_samples 8100 unjoint_samples 8100 joint_samples 24 [638131, 1046909] +processed_samples 8100 unjoint_samples 8100 joint_samples 22 [1046788, 971860] +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +processed_samples 8101 unjoint_samples 8100 joint_samples 24 [973752, 1047359] +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55ced2781040] mmco: unref short failure +[h264 @ 0x55ced2781040] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [734714, 1038211] +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [1042843, 166001] +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [419095, 1045626] +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +processed_samples 8200 unjoint_samples 8200 joint_samples 23 [870167, 1047986] +[h264 @ 0x55cecc7f9480] mmco: unref short failure +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [475136, 1047059] +[h264 @ 0x556c9b373f40] mmco: unref short failure +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [999410, 1046909] +processed_samples 8201 unjoint_samples 8200 joint_samples 25 [1046482, 286467] +processed_samples 8200 unjoint_samples 8200 joint_samples 23 [1047074, 212223] +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [1042843, 166001] +[h264 @ 0x556c9bba9b80] mmco: unref short failure +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [734714, 1038211] +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [475136, 1047059] +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [419095, 1045626] +processed_samples 8200 unjoint_samples 8200 joint_samples 23 [870167, 1047986] +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [999410, 1046909] +processed_samples 8200 unjoint_samples 8200 joint_samples 23 [1047074, 212223] +processed_samples 8201 unjoint_samples 8200 joint_samples 25 [1046482, 286467] +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55ced2b76540] mmco: unref short failure +[h264 @ 0x55ced2b76540] mmco: unref short failure +[h264 @ 0x55ced2b76540] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [1042843, 462669] +[h264 @ 0x556c9c34cd00] mmco: unref short failure +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [1047656, 54754] +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [752596, 1045626] +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [1042843, 462669] +processed_samples 8300 unjoint_samples 8300 joint_samples 23 [1047074, 505697] +[h264 @ 0x55cece1c0e40] mmco: unref short failure +processed_samples 8301 unjoint_samples 8300 joint_samples 25 [1046482, 649616] +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [1047656, 54754] +processed_samples 8300 unjoint_samples 8300 joint_samples 25 [228287, 1046909] +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [752596, 1045626] +processed_samples 8300 unjoint_samples 8300 joint_samples 23 [1047074, 505697] +processed_samples 8300 unjoint_samples 8300 joint_samples 25 [228287, 1046909] +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [803995, 1047059] +processed_samples 8301 unjoint_samples 8300 joint_samples 25 [1046482, 649616] +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [803995, 1047059] +processed_samples 8300 unjoint_samples 8300 joint_samples 25 [26893, 1045254] +processed_samples 8300 unjoint_samples 8300 joint_samples 25 [26893, 1045254] +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +processed_samples 8400 unjoint_samples 8400 joint_samples 25 [333078, 1045254] +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [1047656, 366926] +processed_samples 8400 unjoint_samples 8400 joint_samples 25 [333078, 1045254] +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [1047656, 366926] +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [1042843, 796299] +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [1042843, 796299] +processed_samples 8400 unjoint_samples 8400 joint_samples 25 [561371, 1046909] +processed_samples 8400 unjoint_samples 8400 joint_samples 25 [561371, 1046909] +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [1029417, 1045626] +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [1029417, 1045626] +processed_samples 8400 unjoint_samples 8400 joint_samples 25 [31726, 1047289] +processed_samples 8400 unjoint_samples 8400 joint_samples 25 [31726, 1047289] +processed_samples 8401 unjoint_samples 8400 joint_samples 26 [47683, 1046160] +processed_samples 8401 unjoint_samples 8400 joint_samples 26 [47683, 1046160] +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +processed_samples 8400 unjoint_samples 8400 joint_samples 23 [1047074, 804627] +processed_samples 8400 unjoint_samples 8400 joint_samples 23 [1047074, 804627] +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [1047197, 52815] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [783656, 1045254] +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55ced2b76540] mmco: unref short failure +processed_samples 8500 unjoint_samples 8500 joint_samples 24 [166905, 993581] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [1045910, 270435] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [411128, 1047289] +processed_samples 8501 unjoint_samples 8500 joint_samples 26 [400619, 1046160] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [1047197, 52815] +[h264 @ 0x556ca00b4a00] mmco: unref short failure +processed_samples 8500 unjoint_samples 8500 joint_samples 24 [1047656, 727865] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [783656, 1045254] +processed_samples 8500 unjoint_samples 8500 joint_samples 24 [166905, 993581] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [829177, 1046909] +[h264 @ 0x55cecfd10e40] mmco: unref short failure +processed_samples 8501 unjoint_samples 8500 joint_samples 26 [400619, 1046160] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [411128, 1047289] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [1045910, 270435] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [829177, 1046909] +processed_samples 8500 unjoint_samples 8500 joint_samples 24 [1047656, 727865] +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556ca20d5b40] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[mov,mp4,m4a,3gp,3g2,mj2 @ 0x556c9f0b5a00] stream 1, offset 0x1400056: partial file +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[mov,mp4,m4a,3gp,3g2,mj2 @ 0x55cecf9bf680] stream 1, offset 0x1400056: partial file +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [1047197, 448594] +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [979946, 1045254] +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [1047197, 448594] +processed_samples 8600 unjoint_samples 8600 joint_samples 26 [237013, 1047463] +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +processed_samples 8601 unjoint_samples 8600 joint_samples 26 [634880, 1046160] +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [1045910, 521060] +[h264 @ 0x55cecf993980] mmco: unref short failure +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [729408, 1047289] +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +processed_samples 8601 unjoint_samples 8600 joint_samples 24 [461898, 993581] +processed_samples 8600 unjoint_samples 8600 joint_samples 24 [1047656, 977812] +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [979946, 1045254] +processed_samples 8600 unjoint_samples 8600 joint_samples 26 [237013, 1047463] +processed_samples 8601 unjoint_samples 8600 joint_samples 26 [634880, 1046160] +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [1045910, 521060] +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9c5bbb40] mmco: unref short failure +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [729408, 1047289] +processed_samples 8601 unjoint_samples 8600 joint_samples 24 [461898, 993581] +processed_samples 8600 unjoint_samples 8600 joint_samples 24 [1047656, 977812] +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecc07e5c0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecc07e5c0] mmco: unref short failure +[h264 @ 0x556c9f1aaf40] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecc6a30c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +processed_samples 8700 unjoint_samples 8700 joint_samples 26 [1045464, 218442] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [154464, 1046681] +processed_samples 8700 unjoint_samples 8700 joint_samples 26 [1045464, 218442] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [154464, 1046681] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [1047197, 674100] +processed_samples 8700 unjoint_samples 8700 joint_samples 26 [523537, 1047463] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [1047197, 674100] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [1045910, 837423] +processed_samples 8700 unjoint_samples 8700 joint_samples 26 [523537, 1047463] +processed_samples 8701 unjoint_samples 8700 joint_samples 26 [941545, 1046160] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [964842, 1047289] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [1045910, 837423] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [964842, 1047289] +processed_samples 8701 unjoint_samples 8700 joint_samples 26 [941545, 1046160] +processed_samples 8701 unjoint_samples 8700 joint_samples 24 [761690, 993581] +processed_samples 8701 unjoint_samples 8700 joint_samples 24 [761690, 993581] +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [1022491, 226902] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [1022491, 226902] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [113484, 1037612] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [113484, 1037612] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [848196, 1047463] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [1045464, 553397] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [1045464, 553397] +processed_samples 8800 unjoint_samples 8800 joint_samples 25 [556269, 1046681] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [848196, 1047463] +processed_samples 8800 unjoint_samples 8800 joint_samples 25 [556269, 1046681] +processed_samples 8800 unjoint_samples 8800 joint_samples 25 [1047197, 999351] +processed_samples 8801 unjoint_samples 8800 joint_samples 27 [371674, 1046543] +processed_samples 8801 unjoint_samples 8800 joint_samples 27 [371674, 1046543] +processed_samples 8800 unjoint_samples 8800 joint_samples 25 [1047197, 999351] +processed_samples 8801 unjoint_samples 8800 joint_samples 24 [1014387, 1013189] +processed_samples 8801 unjoint_samples 8800 joint_samples 24 [1014387, 1013189] +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9b7845c0] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [1047197, 224074] +processed_samples 8900 unjoint_samples 8900 joint_samples 27 [1029307, 130326] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [371951, 1037612] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [1047197, 224074] +processed_samples 8900 unjoint_samples 8900 joint_samples 27 [1029307, 130326] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [1022491, 548375] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [1022491, 548375] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [371951, 1037612] +processed_samples 8901 unjoint_samples 8900 joint_samples 25 [205172, 1046452] +processed_samples 8901 unjoint_samples 8900 joint_samples 25 [205172, 1046452] +processed_samples 8900 unjoint_samples 8900 joint_samples 25 [841270, 1046681] +processed_samples 8900 unjoint_samples 8900 joint_samples 25 [841270, 1046681] +processed_samples 8901 unjoint_samples 8900 joint_samples 27 [676091, 1046543] +processed_samples 8901 unjoint_samples 8900 joint_samples 27 [676091, 1046543] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [1045464, 1041725] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [1045464, 1041725] +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +processed_samples 9000 unjoint_samples 9000 joint_samples 27 [340239, 1047599] +[h264 @ 0x55cecda179c0] mmco: unref short failure +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [1047197, 542511] +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [76840, 1046681] +processed_samples 9000 unjoint_samples 9000 joint_samples 27 [1029307, 500092] +[h264 @ 0x556c9b7845c0] mmco: unref short failure +processed_samples 9000 unjoint_samples 9000 joint_samples 27 [340239, 1047599] +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [858792, 1037612] +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [1022491, 822841] +processed_samples 9001 unjoint_samples 9000 joint_samples 25 [447773, 1046452] +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [1047197, 542511] +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [76840, 1046681] +processed_samples 9000 unjoint_samples 9000 joint_samples 27 [1029307, 500092] +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [858792, 1037612] +processed_samples 9001 unjoint_samples 9000 joint_samples 27 [957436, 1046543] +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [1022491, 822841] +processed_samples 9001 unjoint_samples 9000 joint_samples 25 [447773, 1046452] +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +processed_samples 9001 unjoint_samples 9000 joint_samples 27 [957436, 1046543] +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556ca08317c0] mmco: unref short failure +[h264 @ 0x556ca08317c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [1046832, 55244] +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [1018677, 144608] +[h264 @ 0x556c9be345c0] mmco: unref short failure +processed_samples 9100 unjoint_samples 9100 joint_samples 26 [325525, 1046681] +[h264 @ 0x55cecccaa700] mmco: unref short failure +processed_samples 9100 unjoint_samples 9100 joint_samples 26 [1047197, 899130] +[h264 @ 0x55cecccaa700] mmco: unref short failure +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [1029307, 767950] +processed_samples 9101 unjoint_samples 9100 joint_samples 28 [1043191, 230440] +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [565306, 1047599] +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [1046832, 55244] +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [1018677, 144608] +processed_samples 9100 unjoint_samples 9100 joint_samples 26 [325525, 1046681] +processed_samples 9101 unjoint_samples 9100 joint_samples 25 [864873, 1046452] +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [565306, 1047599] +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [1029307, 767950] +[h264 @ 0x556c9b373f40] mmco: unref short failure +processed_samples 9100 unjoint_samples 9100 joint_samples 26 [1047197, 899130] +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +processed_samples 9101 unjoint_samples 9100 joint_samples 28 [1043191, 230440] +[h264 @ 0x556c9b93f280] mmco: unref short failure +processed_samples 9101 unjoint_samples 9100 joint_samples 25 [864873, 1046452] +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +processed_samples 9200 unjoint_samples 9200 joint_samples 28 [11343, 1046944] +processed_samples 9200 unjoint_samples 9200 joint_samples 28 [11343, 1046944] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [1018677, 540795] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [1018677, 540795] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [1047620, 227061] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [1047620, 227061] +processed_samples 9201 unjoint_samples 9200 joint_samples 26 [1045705, 53486] +processed_samples 9201 unjoint_samples 9200 joint_samples 26 [1045705, 53486] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [1046832, 475789] +processed_samples 9200 unjoint_samples 9200 joint_samples 26 [639884, 1046681] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [1046832, 475789] +processed_samples 9201 unjoint_samples 9200 joint_samples 28 [1043191, 576947] +processed_samples 9200 unjoint_samples 9200 joint_samples 26 [639884, 1046681] +processed_samples 9201 unjoint_samples 9200 joint_samples 28 [1043191, 576947] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [990829, 1047599] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [990829, 1047599] +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecccaa700] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9efa0e80] mmco: unref short failure +[h264 @ 0x556c9efa0e80] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1005117, 106803] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1005117, 106803] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1047620, 502750] +processed_samples 9300 unjoint_samples 9300 joint_samples 28 [230672, 1047599] +processed_samples 9300 unjoint_samples 9300 joint_samples 28 [301655, 1046944] +processed_samples 9300 unjoint_samples 9300 joint_samples 28 [230672, 1047599] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1047620, 502750] +processed_samples 9300 unjoint_samples 9300 joint_samples 28 [301655, 1046944] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1018677, 839931] +processed_samples 9301 unjoint_samples 9300 joint_samples 28 [1043191, 921648] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1046832, 773262] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1046832, 773262] +processed_samples 9301 unjoint_samples 9300 joint_samples 28 [1043191, 921648] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1018677, 839931] +processed_samples 9301 unjoint_samples 9300 joint_samples 26 [1045705, 313556] +processed_samples 9301 unjoint_samples 9300 joint_samples 26 [1045705, 313556] +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55ced26cea40] mmco: unref short failure +[h264 @ 0x55ced26cea40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [1005117, 384659] +processed_samples 9400 unjoint_samples 9400 joint_samples 28 [642550, 1047599] +processed_samples 9400 unjoint_samples 9400 joint_samples 28 [108285, 1032494] +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [1047620, 828560] +processed_samples 9400 unjoint_samples 9400 joint_samples 28 [551011, 1046944] +processed_samples 9401 unjoint_samples 9400 joint_samples 29 [1043191, 152732] +processed_samples 9400 unjoint_samples 9400 joint_samples 28 [642550, 1047599] +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [1005117, 384659] +[h264 @ 0x55cecd2a7240] mmco: unref short failure +processed_samples 9400 unjoint_samples 9400 joint_samples 28 [108285, 1032494] +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [1047620, 828560] +processed_samples 9400 unjoint_samples 9400 joint_samples 28 [551011, 1046944] +processed_samples 9401 unjoint_samples 9400 joint_samples 29 [1043191, 152732] +processed_samples 9401 unjoint_samples 9400 joint_samples 26 [1045705, 642403] +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [1046832, 1029823] +[h264 @ 0x556ca103f1c0] mmco: unref short failure +processed_samples 9401 unjoint_samples 9400 joint_samples 26 [1045705, 642403] +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [1046832, 1029823] +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +processed_samples 9500 unjoint_samples 9500 joint_samples 29 [1047577, 3860] +processed_samples 9500 unjoint_samples 9500 joint_samples 29 [1047577, 3860] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [1047620, 131615] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [1047620, 131615] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [321355, 1045729] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [321355, 1045729] +processed_samples 9501 unjoint_samples 9500 joint_samples 29 [1043191, 442138] +processed_samples 9500 unjoint_samples 9500 joint_samples 27 [1005117, 706638] +processed_samples 9500 unjoint_samples 9500 joint_samples 27 [1005117, 706638] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [477247, 1032494] +processed_samples 9501 unjoint_samples 9500 joint_samples 29 [1043191, 442138] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [477247, 1032494] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [899641, 1046944] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [899641, 1046944] +processed_samples 9501 unjoint_samples 9500 joint_samples 26 [1045705, 880631] +processed_samples 9501 unjoint_samples 9500 joint_samples 26 [1045705, 880631] +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cec1164740] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9f3e3200] mmco: unref short failure +[h264 @ 0x556c9f3e3200] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +processed_samples 9600 unjoint_samples 9600 joint_samples 29 [1047577, 289417] +processed_samples 9600 unjoint_samples 9600 joint_samples 29 [1047577, 289417] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [1047620, 474739] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [1047620, 474739] +processed_samples 9600 unjoint_samples 9600 joint_samples 27 [1005117, 997481] +processed_samples 9600 unjoint_samples 9600 joint_samples 29 [196882, 1046944] +processed_samples 9600 unjoint_samples 9600 joint_samples 29 [196882, 1046944] +processed_samples 9600 unjoint_samples 9600 joint_samples 27 [1005117, 997481] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [776580, 1032494] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [776580, 1032494] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [702887, 1045729] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [702887, 1045729] +processed_samples 9601 unjoint_samples 9600 joint_samples 27 [86050, 1047717] +processed_samples 9601 unjoint_samples 9600 joint_samples 27 [86050, 1047717] +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +processed_samples 9601 unjoint_samples 9600 joint_samples 29 [1043191, 740130] +processed_samples 9601 unjoint_samples 9600 joint_samples 29 [1043191, 740130] +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +processed_samples 9700 unjoint_samples 9700 joint_samples 29 [1047577, 546835] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [1047377, 206456] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [1047377, 206456] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [1047620, 788763] +processed_samples 9700 unjoint_samples 9700 joint_samples 29 [1047577, 546835] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [1047620, 788763] +processed_samples 9700 unjoint_samples 9700 joint_samples 29 [465093, 1046944] +processed_samples 9700 unjoint_samples 9700 joint_samples 29 [465093, 1046944] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [942755, 1045729] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [942755, 1045729] +processed_samples 9701 unjoint_samples 9700 joint_samples 27 [383815, 1047717] +processed_samples 9701 unjoint_samples 9700 joint_samples 27 [383815, 1047717] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [1013970, 1032494] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [1013970, 1032494] +processed_samples 9701 unjoint_samples 9700 joint_samples 29 [1043191, 1019256] +processed_samples 9701 unjoint_samples 9700 joint_samples 29 [1043191, 1019256] +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55ced04fa540] mmco: unref short failure +[h264 @ 0x55ced04fa540] mmco: unref short failure +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [1040090, 255952] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [1040090, 255952] +processed_samples 9800 unjoint_samples 9800 joint_samples 28 [1047620, 1004378] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [1047541, 368911] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [1047541, 368911] +processed_samples 9800 unjoint_samples 9800 joint_samples 28 [1047620, 1004378] +processed_samples 9800 unjoint_samples 9800 joint_samples 28 [1047377, 541169] +processed_samples 9800 unjoint_samples 9800 joint_samples 28 [1047377, 541169] +processed_samples 9801 unjoint_samples 9800 joint_samples 30 [342128, 1023636] +processed_samples 9801 unjoint_samples 9800 joint_samples 30 [342128, 1023636] +[h264 @ 0x556c9f75cb40] mmco: unref short failure +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [753404, 1046944] +[h264 @ 0x55cecd2a7240] mmco: unref short failure +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [1047577, 826843] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [753404, 1046944] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [1047577, 826843] +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +processed_samples 9801 unjoint_samples 9800 joint_samples 27 [731921, 1047717] +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +processed_samples 9801 unjoint_samples 9800 joint_samples 27 [731921, 1047717] +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc696840] mmco: unref short failure +[h264 @ 0x55cecc696840] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9c5bbb40] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +processed_samples 9900 unjoint_samples 9900 joint_samples 30 [1047577, 125388] +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [1040090, 504777] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [444064, 1042777] +processed_samples 9900 unjoint_samples 9900 joint_samples 28 [1047377, 888134] +processed_samples 9900 unjoint_samples 9900 joint_samples 30 [1047577, 125388] +processed_samples 9901 unjoint_samples 9900 joint_samples 30 [584013, 1023636] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [1040090, 504777] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [1047541, 778830] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [444064, 1042777] +processed_samples 9900 unjoint_samples 9900 joint_samples 28 [1047377, 888134] +processed_samples 9901 unjoint_samples 9900 joint_samples 27 [1035212, 1047717] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [1031215, 1046944] +processed_samples 9901 unjoint_samples 9900 joint_samples 30 [584013, 1023636] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [1047541, 778830] +processed_samples 9901 unjoint_samples 9900 joint_samples 27 [1035212, 1047717] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [1031215, 1046944] +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [155904, 1047327] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [882001, 1042777] +processed_samples 10000 unjoint_samples 10000 joint_samples 30 [1047577, 338141] +processed_samples 10000 unjoint_samples 10000 joint_samples 30 [83158, 1041366] +processed_samples 10000 unjoint_samples 10000 joint_samples 30 [1041449, 323679] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [882001, 1042777] +processed_samples 10000 unjoint_samples 10000 joint_samples 30 [1047577, 338141] +processed_samples 10001 unjoint_samples 10000 joint_samples 28 [1041944, 314453] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [155904, 1047327] +processed_samples 10000 unjoint_samples 10000 joint_samples 30 [83158, 1041366] +processed_samples 10000 unjoint_samples 10000 joint_samples 30 [1041449, 323679] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [1040090, 767920] +processed_samples 10001 unjoint_samples 10000 joint_samples 30 [849173, 1023636] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [1040090, 767920] +processed_samples 10001 unjoint_samples 10000 joint_samples 28 [1041944, 314453] +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +processed_samples 10001 unjoint_samples 10000 joint_samples 30 [849173, 1023636] +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecbc1ca40] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1048070, 58931] +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1048070, 58931] +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1048409, 79047] +processed_samples 10100 unjoint_samples 10100 joint_samples 29 [432666, 1047327] +processed_samples 10100 unjoint_samples 10100 joint_samples 29 [432666, 1047327] +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1048409, 79047] +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [380669, 1041366] +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [380669, 1041366] +processed_samples 10101 unjoint_samples 10100 joint_samples 31 [1047522, 21786] +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1047577, 693943] +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +processed_samples 10101 unjoint_samples 10100 joint_samples 31 [1047522, 21786] +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1041449, 641137] +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1041449, 641137] +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1047577, 693943] +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +processed_samples 10101 unjoint_samples 10100 joint_samples 28 [1041944, 549291] +processed_samples 10101 unjoint_samples 10100 joint_samples 28 [1041944, 549291] +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1048070, 535473] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1048409, 363681] +processed_samples 10200 unjoint_samples 10200 joint_samples 29 [747244, 1047327] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [826242, 1041366] +[h264 @ 0x556c9d990ec0] mmco: unref short failure +processed_samples 10201 unjoint_samples 10200 joint_samples 31 [1047522, 257475] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1047577, 1015793] +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1048070, 535473] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1048409, 363681] +processed_samples 10200 unjoint_samples 10200 joint_samples 29 [747244, 1047327] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [826242, 1041366] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1041449, 885876] +processed_samples 10201 unjoint_samples 10200 joint_samples 28 [1041944, 854222] +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +processed_samples 10201 unjoint_samples 10200 joint_samples 31 [1047522, 257475] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1047577, 1015793] +[h264 @ 0x55cecda4eb80] mmco: unref short failure +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1041449, 885876] +processed_samples 10201 unjoint_samples 10200 joint_samples 28 [1041944, 854222] +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9f1aaf40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +processed_samples 10300 unjoint_samples 10300 joint_samples 31 [1045857, 117462] +processed_samples 10300 unjoint_samples 10300 joint_samples 31 [1047753, 43746] +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [1048409, 637076] +processed_samples 10300 unjoint_samples 10300 joint_samples 31 [1045857, 117462] +processed_samples 10300 unjoint_samples 10300 joint_samples 31 [1047753, 43746] +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [1048409, 637076] +processed_samples 10300 unjoint_samples 10300 joint_samples 31 [265132, 1047529] +processed_samples 10300 unjoint_samples 10300 joint_samples 31 [265132, 1047529] +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [1048070, 877911] +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [1048070, 877911] +processed_samples 10301 unjoint_samples 10300 joint_samples 29 [1044212, 174126] +processed_samples 10300 unjoint_samples 10300 joint_samples 29 [997702, 1047327] +processed_samples 10301 unjoint_samples 10300 joint_samples 29 [1044212, 174126] +processed_samples 10300 unjoint_samples 10300 joint_samples 29 [997702, 1047327] +processed_samples 10301 unjoint_samples 10300 joint_samples 31 [1047522, 749666] +processed_samples 10301 unjoint_samples 10300 joint_samples 31 [1047522, 749666] +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9b7845c0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [637859, 1047529] +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [98973, 1046949] +processed_samples 10400 unjoint_samples 10400 joint_samples 30 [315844, 1047327] +processed_samples 10401 unjoint_samples 10400 joint_samples 32 [57126, 1046247] +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [637859, 1047529] +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [1047753, 247996] +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [98973, 1046949] +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [1045857, 394384] +processed_samples 10400 unjoint_samples 10400 joint_samples 30 [315844, 1047327] +processed_samples 10401 unjoint_samples 10400 joint_samples 32 [57126, 1046247] +processed_samples 10401 unjoint_samples 10400 joint_samples 29 [1044212, 588571] +processed_samples 10400 unjoint_samples 10400 joint_samples 30 [1048409, 938560] +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [1045857, 394384] +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [1047753, 247996] +processed_samples 10401 unjoint_samples 10400 joint_samples 29 [1044212, 588571] +processed_samples 10400 unjoint_samples 10400 joint_samples 30 [1048409, 938560] +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [110011, 1047614] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [1047753, 562734] +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +processed_samples 10501 unjoint_samples 10500 joint_samples 32 [441219, 1046247] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [1045857, 666255] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [977671, 1047529] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [367344, 1046949] +processed_samples 10500 unjoint_samples 10500 joint_samples 30 [749738, 1047327] +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +processed_samples 10501 unjoint_samples 10500 joint_samples 29 [1044212, 930533] +[h264 @ 0x556c9c773b80] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [1047753, 562734] +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [110011, 1047614] +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [367344, 1046949] +processed_samples 10501 unjoint_samples 10500 joint_samples 32 [441219, 1046247] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [1045857, 666255] +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +processed_samples 10500 unjoint_samples 10500 joint_samples 30 [749738, 1047327] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [977671, 1047529] +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +processed_samples 10501 unjoint_samples 10500 joint_samples 29 [1044212, 930533] +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9f3e3200] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +processed_samples 10600 unjoint_samples 10600 joint_samples 30 [960556, 1047327] +processed_samples 10600 unjoint_samples 10600 joint_samples 32 [147207, 938133] +processed_samples 10600 unjoint_samples 10600 joint_samples 32 [147207, 938133] +processed_samples 10600 unjoint_samples 10600 joint_samples 30 [960556, 1047327] +processed_samples 10600 unjoint_samples 10600 joint_samples 32 [191734, 1047529] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [495072, 1047614] +processed_samples 10600 unjoint_samples 10600 joint_samples 32 [191734, 1047529] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [495072, 1047614] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [695200, 1046949] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [695200, 1046949] +processed_samples 10601 unjoint_samples 10600 joint_samples 30 [244551, 1022690] +processed_samples 10601 unjoint_samples 10600 joint_samples 30 [244551, 1022690] +processed_samples 10601 unjoint_samples 10600 joint_samples 32 [673277, 1046247] +processed_samples 10601 unjoint_samples 10600 joint_samples 32 [673277, 1046247] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [1047753, 835523] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [1047753, 835523] +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9c5bbb40] mmco: unref short failure +[h264 @ 0x556c9c5bbb40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +processed_samples 10700 unjoint_samples 10700 joint_samples 32 [450311, 1047529] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [1030783, 1046949] +processed_samples 10700 unjoint_samples 10700 joint_samples 32 [1047753, 160091] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [1044661, 260370] +processed_samples 10700 unjoint_samples 10700 joint_samples 32 [569792, 938133] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [734408, 1047614] +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +processed_samples 10700 unjoint_samples 10700 joint_samples 32 [450311, 1047529] +processed_samples 10701 unjoint_samples 10700 joint_samples 30 [635544, 1022690] +processed_samples 10701 unjoint_samples 10700 joint_samples 32 [999470, 1046247] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [1030783, 1046949] +processed_samples 10700 unjoint_samples 10700 joint_samples 32 [1047753, 160091] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [1044661, 260370] +processed_samples 10700 unjoint_samples 10700 joint_samples 32 [569792, 938133] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [734408, 1047614] +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +processed_samples 10701 unjoint_samples 10700 joint_samples 30 [635544, 1022690] +processed_samples 10701 unjoint_samples 10700 joint_samples 32 [999470, 1046247] +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55cecbd51640] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecbd51640] mmco: unref short failure +[h264 @ 0x55cecbd51640] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1047277, 148911] +processed_samples 10801 unjoint_samples 10800 joint_samples 33 [1038620, 233148] +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1035013, 310522] +processed_samples 10800 unjoint_samples 10800 joint_samples 31 [1044661, 538308] +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1047753, 371203] +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [757484, 1047529] +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [898480, 938133] +processed_samples 10801 unjoint_samples 10800 joint_samples 30 [995798, 1022690] +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1047277, 148911] +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1035013, 310522] +processed_samples 10801 unjoint_samples 10800 joint_samples 33 [1038620, 233148] +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1047753, 371203] +processed_samples 10800 unjoint_samples 10800 joint_samples 31 [1044661, 538308] +[h264 @ 0x556c9d6704c0] mmco: unref short failure +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [898480, 938133] +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [757484, 1047529] +[h264 @ 0x55ceccc93c40] mmco: unref short failure +processed_samples 10801 unjoint_samples 10800 joint_samples 30 [995798, 1022690] +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +processed_samples 10900 unjoint_samples 10900 joint_samples 33 [1041358, 300953] +processed_samples 10900 unjoint_samples 10900 joint_samples 33 [1041358, 300953] +processed_samples 10900 unjoint_samples 10900 joint_samples 33 [1047393, 48503] +processed_samples 10900 unjoint_samples 10900 joint_samples 33 [1047393, 48503] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1047277, 466929] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1047277, 466929] +processed_samples 10900 unjoint_samples 10900 joint_samples 31 [1044661, 872335] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1035013, 606606] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1035013, 606606] +processed_samples 10901 unjoint_samples 10900 joint_samples 31 [223352, 1048382] +processed_samples 10900 unjoint_samples 10900 joint_samples 31 [1044661, 872335] +processed_samples 10901 unjoint_samples 10900 joint_samples 31 [223352, 1048382] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1047753, 628737] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1047753, 628737] +processed_samples 10901 unjoint_samples 10900 joint_samples 33 [1038620, 505965] +processed_samples 10901 unjoint_samples 10900 joint_samples 33 [1038620, 505965] +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9f3e3200] mmco: unref short failure +[h264 @ 0x556c9f3e3200] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +processed_samples 11000 unjoint_samples 11000 joint_samples 33 [1047393, 385166] +processed_samples 11000 unjoint_samples 11000 joint_samples 33 [1041358, 768442] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1046894, 123449] +processed_samples 11000 unjoint_samples 11000 joint_samples 33 [1047393, 385166] +processed_samples 11000 unjoint_samples 11000 joint_samples 33 [1041358, 768442] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1046894, 123449] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1047277, 845315] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1035013, 920371] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1047277, 845315] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1035013, 920371] +processed_samples 11001 unjoint_samples 11000 joint_samples 33 [1038620, 886255] +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +processed_samples 11001 unjoint_samples 11000 joint_samples 33 [1038620, 886255] +processed_samples 11001 unjoint_samples 11000 joint_samples 31 [470671, 1048382] +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1047753, 901431] +[h264 @ 0x55cecd2a7240] mmco: unref short failure +processed_samples 11001 unjoint_samples 11000 joint_samples 31 [470671, 1048382] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1047753, 901431] +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1043740, 107280] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [365400, 1032816] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1047277, 71354] +processed_samples 11100 unjoint_samples 11100 joint_samples 32 [1046894, 458569] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1043740, 107280] +processed_samples 11100 unjoint_samples 11100 joint_samples 34 [1924, 1047385] +processed_samples 11101 unjoint_samples 11100 joint_samples 34 [182197, 1026390] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1047393, 854364] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [365400, 1032816] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1047277, 71354] +processed_samples 11100 unjoint_samples 11100 joint_samples 34 [1924, 1047385] +processed_samples 11100 unjoint_samples 11100 joint_samples 32 [1046894, 458569] +processed_samples 11101 unjoint_samples 11100 joint_samples 34 [182197, 1026390] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1047393, 854364] +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +processed_samples 11101 unjoint_samples 11100 joint_samples 31 [804797, 1048382] +processed_samples 11101 unjoint_samples 11100 joint_samples 31 [804797, 1048382] +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cec1144d40] mmco: unref short failure +[h264 @ 0x55cec1144d40] mmco: unref short failure +[h264 @ 0x55cec1144d40] mmco: unref short failure +[h264 @ 0x55cec1144d40] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [1047277, 315802] +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [1047277, 315802] +processed_samples 11200 unjoint_samples 11200 joint_samples 34 [74432, 1040164] +processed_samples 11200 unjoint_samples 11200 joint_samples 34 [74432, 1040164] +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [1043740, 476647] +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [1043740, 476647] +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [668589, 1032816] +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [668589, 1032816] +processed_samples 11201 unjoint_samples 11200 joint_samples 32 [1047003, 30376] +processed_samples 11201 unjoint_samples 11200 joint_samples 32 [1047003, 30376] +processed_samples 11201 unjoint_samples 11200 joint_samples 34 [519824, 1026390] +processed_samples 11200 unjoint_samples 11200 joint_samples 34 [319131, 1047385] +processed_samples 11200 unjoint_samples 11200 joint_samples 34 [319131, 1047385] +processed_samples 11201 unjoint_samples 11200 joint_samples 34 [519824, 1026390] +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +processed_samples 11200 unjoint_samples 11200 joint_samples 32 [1046894, 705785] +processed_samples 11200 unjoint_samples 11200 joint_samples 32 [1046894, 705785] +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +processed_samples 11300 unjoint_samples 11300 joint_samples 34 [386667, 1040164] +processed_samples 11300 unjoint_samples 11300 joint_samples 32 [1046894, 991749] +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [1047277, 611876] +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +processed_samples 11300 unjoint_samples 11300 joint_samples 34 [643009, 1047385] +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [1043740, 709182] +processed_samples 11300 unjoint_samples 11300 joint_samples 34 [386667, 1040164] +processed_samples 11301 unjoint_samples 11300 joint_samples 32 [1047003, 429681] +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [932385, 1032816] +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [1043740, 709182] +processed_samples 11300 unjoint_samples 11300 joint_samples 32 [1046894, 991749] +processed_samples 11300 unjoint_samples 11300 joint_samples 34 [643009, 1047385] +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [1047277, 611876] +processed_samples 11301 unjoint_samples 11300 joint_samples 32 [1047003, 429681] +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [932385, 1032816] +processed_samples 11301 unjoint_samples 11300 joint_samples 34 [873993, 1026390] +processed_samples 11301 unjoint_samples 11300 joint_samples 34 [873993, 1026390] +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecf951900] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [701491, 1040164] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [142722, 998360] +[h264 @ 0x556c9d990ec0] mmco: unref short failure +processed_samples 11400 unjoint_samples 11400 joint_samples 33 [343569, 1046749] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [1046459, 157077] +processed_samples 11400 unjoint_samples 11400 joint_samples 33 [1047277, 929060] +[h264 @ 0x55cece1c0e40] mmco: unref short failure +processed_samples 11401 unjoint_samples 11400 joint_samples 35 [149074, 1046603] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [929893, 1047385] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [701491, 1040164] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [142722, 998360] +[h264 @ 0x556ca0b64640] mmco: unref short failure +processed_samples 11400 unjoint_samples 11400 joint_samples 33 [343569, 1046749] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [1046459, 157077] +processed_samples 11400 unjoint_samples 11400 joint_samples 33 [1047277, 929060] +processed_samples 11401 unjoint_samples 11400 joint_samples 35 [149074, 1046603] +processed_samples 11401 unjoint_samples 11400 joint_samples 32 [1047003, 726198] +[h264 @ 0x55cecd2579c0] mmco: unref short failure +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [929893, 1047385] +[h264 @ 0x55cecfd71980] mmco: unref short failure +processed_samples 11401 unjoint_samples 11400 joint_samples 32 [1047003, 726198] +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b6ce740] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [397714, 998360] +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1046459, 428375] +processed_samples 11500 unjoint_samples 11500 joint_samples 33 [598164, 1046749] +processed_samples 11500 unjoint_samples 11500 joint_samples 35 [204088, 1047385] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [219530, 1046479] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1032650, 1040164] +processed_samples 11501 unjoint_samples 11500 joint_samples 35 [471667, 1046603] +processed_samples 11500 unjoint_samples 11500 joint_samples 33 [598164, 1046749] +processed_samples 11500 unjoint_samples 11500 joint_samples 35 [204088, 1047385] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [219530, 1046479] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1046459, 428375] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1032650, 1040164] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [397714, 998360] +processed_samples 11501 unjoint_samples 11500 joint_samples 32 [1047003, 1031582] +[h264 @ 0x55cecf993980] mmco: unref short failure +processed_samples 11501 unjoint_samples 11500 joint_samples 35 [471667, 1046603] +[h264 @ 0x556c9b93f280] mmco: unref short failure +processed_samples 11501 unjoint_samples 11500 joint_samples 32 [1047003, 1031582] +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b6ce740] mmco: unref short failure +[h264 @ 0x556c9b6ce740] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[mov,mp4,m4a,3gp,3g2,mj2 @ 0x55cecc84dc80] stream 1, offset 0x1400a4d: partial file +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[mov,mp4,m4a,3gp,3g2,mj2 @ 0x556c9f82e880] stream 1, offset 0x1400a4d: partial file +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [498264, 1046479] +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [1046459, 746233] +processed_samples 11600 unjoint_samples 11600 joint_samples 35 [282290, 1042651] +processed_samples 11600 unjoint_samples 11600 joint_samples 35 [486638, 1047385] +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [694018, 998360] +processed_samples 11601 unjoint_samples 11600 joint_samples 35 [816607, 1046603] +processed_samples 11601 unjoint_samples 11600 joint_samples 33 [1047003, 245399] +processed_samples 11601 unjoint_samples 11600 joint_samples 33 [1005987, 1046749] +[h264 @ 0x55cecca039c0] mmco: unref short failure +processed_samples 11600 unjoint_samples 11600 joint_samples 35 [282290, 1042651] +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [694018, 998360] +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [498264, 1046479] +processed_samples 11600 unjoint_samples 11600 joint_samples 35 [486638, 1047385] +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [1046459, 746233] +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +processed_samples 11601 unjoint_samples 11600 joint_samples 33 [1047003, 245399] +processed_samples 11601 unjoint_samples 11600 joint_samples 35 [816607, 1046603] +processed_samples 11601 unjoint_samples 11600 joint_samples 33 [1005987, 1046749] +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9f1aaf40] mmco: unref short failure +[h264 @ 0x556c9f1aaf40] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [525846, 1042651] +processed_samples 11700 unjoint_samples 11700 joint_samples 34 [1017116, 1015746] +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [725052, 1047385] +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [74098, 1047840] +processed_samples 11701 unjoint_samples 11700 joint_samples 34 [1038250, 351315] +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [525846, 1042651] +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [725052, 1047385] +processed_samples 11700 unjoint_samples 11700 joint_samples 34 [1017116, 1015746] +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +processed_samples 11701 unjoint_samples 11700 joint_samples 33 [1047003, 535624] +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [74098, 1047840] +processed_samples 11700 unjoint_samples 11700 joint_samples 34 [892987, 1046479] +processed_samples 11701 unjoint_samples 11700 joint_samples 34 [1038250, 351315] +processed_samples 11701 unjoint_samples 11700 joint_samples 36 [775051, 1046603] +processed_samples 11701 unjoint_samples 11700 joint_samples 36 [775051, 1046603] +processed_samples 11701 unjoint_samples 11700 joint_samples 33 [1047003, 535624] +processed_samples 11700 unjoint_samples 11700 joint_samples 34 [892987, 1046479] +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9f3e3200] mmco: unref short failure +[h264 @ 0x556c9f3e3200] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [407591, 1042820] +[h264 @ 0x556c9b373f40] mmco: unref short failure +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [366716, 1047840] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [152326, 1046956] +processed_samples 11801 unjoint_samples 11800 joint_samples 37 [173216, 1046603] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1047332, 1047385] +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [797108, 1042651] +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +processed_samples 11801 unjoint_samples 11800 joint_samples 33 [1047003, 951337] +processed_samples 11801 unjoint_samples 11800 joint_samples 34 [1038250, 702101] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [407591, 1042820] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [366716, 1047840] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [152326, 1046956] +processed_samples 11801 unjoint_samples 11800 joint_samples 37 [173216, 1046603] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1047332, 1047385] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [797108, 1042651] +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +processed_samples 11801 unjoint_samples 11800 joint_samples 34 [1038250, 702101] +processed_samples 11801 unjoint_samples 11800 joint_samples 33 [1047003, 951337] +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +processed_samples 11900 unjoint_samples 11900 joint_samples 36 [1043573, 57603] +processed_samples 11900 unjoint_samples 11900 joint_samples 36 [1047332, 282562] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [568576, 1046956] +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +processed_samples 11901 unjoint_samples 11900 joint_samples 37 [493307, 1046603] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [623486, 1042820] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [672237, 1047840] +processed_samples 11901 unjoint_samples 11900 joint_samples 34 [1047003, 199671] +processed_samples 11901 unjoint_samples 11900 joint_samples 35 [1042355, 25525] +[h264 @ 0x55cecd757f00] mmco: unref short failure +processed_samples 11900 unjoint_samples 11900 joint_samples 36 [1043573, 57603] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [623486, 1042820] +processed_samples 11901 unjoint_samples 11900 joint_samples 37 [493307, 1046603] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [672237, 1047840] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [568576, 1046956] +processed_samples 11900 unjoint_samples 11900 joint_samples 36 [1047332, 282562] +processed_samples 11901 unjoint_samples 11900 joint_samples 34 [1047003, 199671] +processed_samples 11901 unjoint_samples 11900 joint_samples 35 [1042355, 25525] +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9c068800] mmco: unref short failure +[h264 @ 0x556c9c068800] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cec1164740] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +processed_samples 12000 unjoint_samples 12000 joint_samples 36 [1043573, 308040] +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [931499, 1042820] +processed_samples 12000 unjoint_samples 12000 joint_samples 36 [1047332, 601346] +processed_samples 12001 unjoint_samples 12000 joint_samples 37 [780871, 1046603] +processed_samples 12001 unjoint_samples 12000 joint_samples 35 [1042355, 444713] +processed_samples 12001 unjoint_samples 12000 joint_samples 34 [1047003, 576668] +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [885210, 1046956] +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [939710, 1047840] +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +processed_samples 12000 unjoint_samples 12000 joint_samples 36 [1047332, 601346] +processed_samples 12000 unjoint_samples 12000 joint_samples 36 [1043573, 308040] +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [885210, 1046956] +processed_samples 12001 unjoint_samples 12000 joint_samples 37 [780871, 1046603] +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [939710, 1047840] +processed_samples 12001 unjoint_samples 12000 joint_samples 35 [1042355, 444713] +processed_samples 12001 unjoint_samples 12000 joint_samples 34 [1047003, 576668] +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [931499, 1042820] +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [994442, 302403] +processed_samples 12101 unjoint_samples 12100 joint_samples 35 [1042355, 787823] +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [186338, 1042820] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [1045348, 236755] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [1043573, 597397] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [994442, 302403] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [1047332, 846323] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [1045348, 236755] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [186338, 1042820] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [1043573, 597397] +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +processed_samples 12101 unjoint_samples 12100 joint_samples 35 [1042355, 787823] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [1047332, 846323] +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +processed_samples 12101 unjoint_samples 12100 joint_samples 37 [1021820, 1046603] +processed_samples 12101 unjoint_samples 12100 joint_samples 34 [1047003, 827171] +processed_samples 12101 unjoint_samples 12100 joint_samples 34 [1047003, 827171] +processed_samples 12101 unjoint_samples 12100 joint_samples 37 [1021820, 1046603] +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [577297, 1042820] +processed_samples 12200 unjoint_samples 12200 joint_samples 37 [124350, 1047272] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [994442, 553734] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [1045348, 723294] +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +processed_samples 12201 unjoint_samples 12200 joint_samples 35 [1047003, 56409] +processed_samples 12201 unjoint_samples 12200 joint_samples 36 [151547, 1018669] +processed_samples 12201 unjoint_samples 12200 joint_samples 38 [1040031, 495825] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [1043573, 884738] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [577297, 1042820] +processed_samples 12200 unjoint_samples 12200 joint_samples 37 [124350, 1047272] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [994442, 553734] +[h264 @ 0x556c9c34cd00] mmco: unref short failure +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [1045348, 723294] +[h264 @ 0x55ced0bab780] mmco: unref short failure +processed_samples 12201 unjoint_samples 12200 joint_samples 35 [1047003, 56409] +[h264 @ 0x556c9f1aaf40] mmco: unref short failure +[h264 @ 0x556c9f1aaf40] mmco: unref short failure +processed_samples 12201 unjoint_samples 12200 joint_samples 36 [151547, 1018669] +processed_samples 12201 unjoint_samples 12200 joint_samples 38 [1040031, 495825] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [1043573, 884738] +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9f02e200] mmco: unref short failure +[h264 @ 0x556c9f02e200] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +processed_samples 12300 unjoint_samples 12300 joint_samples 37 [1047409, 165272] +[h264 @ 0x55cecce63a00] mmco: unref short failure +processed_samples 12300 unjoint_samples 12300 joint_samples 37 [73028, 1026090] +processed_samples 12300 unjoint_samples 12300 joint_samples 37 [479223, 1047272] +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [909345, 1042820] +processed_samples 12301 unjoint_samples 12300 joint_samples 36 [471063, 1018669] +[h264 @ 0x556c9b806400] mmco: unref short failure +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [994442, 836676] +processed_samples 12300 unjoint_samples 12300 joint_samples 37 [73028, 1026090] +processed_samples 12301 unjoint_samples 12300 joint_samples 35 [1047003, 328507] +processed_samples 12300 unjoint_samples 12300 joint_samples 37 [1047409, 165272] +processed_samples 12300 unjoint_samples 12300 joint_samples 37 [479223, 1047272] +processed_samples 12301 unjoint_samples 12300 joint_samples 38 [1040031, 716869] +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [994442, 836676] +processed_samples 12301 unjoint_samples 12300 joint_samples 36 [471063, 1018669] +processed_samples 12301 unjoint_samples 12300 joint_samples 35 [1047003, 328507] +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [909345, 1042820] +processed_samples 12301 unjoint_samples 12300 joint_samples 38 [1040031, 716869] +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9c773b80] mmco: unref short failure +[h264 @ 0x556c9c773b80] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [1047769, 123197] +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [1047769, 123197] +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [1047409, 420546] +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [362184, 1026090] +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [1047409, 420546] +processed_samples 12401 unjoint_samples 12400 joint_samples 38 [1040031, 957097] +processed_samples 12401 unjoint_samples 12400 joint_samples 36 [762041, 1018669] +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [362184, 1026090] +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [738784, 1047272] +processed_samples 12401 unjoint_samples 12400 joint_samples 38 [1040031, 957097] +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [738784, 1047272] +processed_samples 12401 unjoint_samples 12400 joint_samples 36 [762041, 1018669] +processed_samples 12401 unjoint_samples 12400 joint_samples 35 [1047003, 696910] +processed_samples 12400 unjoint_samples 12400 joint_samples 36 [1041138, 1042910] +processed_samples 12401 unjoint_samples 12400 joint_samples 35 [1047003, 696910] +processed_samples 12400 unjoint_samples 12400 joint_samples 36 [1041138, 1042910] +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [1047769, 379588] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [1047769, 379588] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [1047409, 714872] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [1047409, 714872] +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +processed_samples 12501 unjoint_samples 12500 joint_samples 39 [214038, 1036876] +processed_samples 12501 unjoint_samples 12500 joint_samples 39 [214038, 1036876] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [279013, 1042910] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [279013, 1042910] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [703769, 1026090] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [703769, 1026090] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [1046666, 1047272] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [1046666, 1047272] +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +processed_samples 12501 unjoint_samples 12500 joint_samples 35 [1047003, 997830] +processed_samples 12501 unjoint_samples 12500 joint_samples 35 [1047003, 997830] +processed_samples 12501 unjoint_samples 12500 joint_samples 36 [1048176, 1046824] +processed_samples 12501 unjoint_samples 12500 joint_samples 36 [1048176, 1046824] +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [1047769, 655502] +processed_samples 12600 unjoint_samples 12600 joint_samples 38 [1046666, 219588] +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [591675, 1042910] +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [945383, 1026090] +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [1047409, 987035] +processed_samples 12601 unjoint_samples 12600 joint_samples 37 [306898, 1046824] +processed_samples 12601 unjoint_samples 12600 joint_samples 36 [1047003, 185873] +processed_samples 12601 unjoint_samples 12600 joint_samples 39 [590042, 1036876] +processed_samples 12600 unjoint_samples 12600 joint_samples 38 [1046666, 219588] +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [591675, 1042910] +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [1047409, 987035] +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [1047769, 655502] +processed_samples 12601 unjoint_samples 12600 joint_samples 37 [306898, 1046824] +processed_samples 12601 unjoint_samples 12600 joint_samples 39 [590042, 1036876] +processed_samples 12601 unjoint_samples 12600 joint_samples 36 [1047003, 185873] +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [945383, 1026090] +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cec1144d40] mmco: unref short failure +[h264 @ 0x55cec1144d40] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +processed_samples 12700 unjoint_samples 12700 joint_samples 38 [1001425, 353343] +processed_samples 12700 unjoint_samples 12700 joint_samples 38 [261882, 1042682] +processed_samples 12700 unjoint_samples 12700 joint_samples 38 [1046666, 484523] +processed_samples 12700 unjoint_samples 12700 joint_samples 37 [924761, 1042910] +processed_samples 12701 unjoint_samples 12700 joint_samples 39 [948979, 1036876] +processed_samples 12701 unjoint_samples 12700 joint_samples 36 [1047003, 480355] +processed_samples 12700 unjoint_samples 12700 joint_samples 37 [1047769, 964204] +processed_samples 12701 unjoint_samples 12700 joint_samples 37 [655761, 1046824] +processed_samples 12700 unjoint_samples 12700 joint_samples 38 [1001425, 353343] +processed_samples 12700 unjoint_samples 12700 joint_samples 38 [261882, 1042682] +processed_samples 12700 unjoint_samples 12700 joint_samples 38 [1046666, 484523] +processed_samples 12700 unjoint_samples 12700 joint_samples 37 [924761, 1042910] +processed_samples 12701 unjoint_samples 12700 joint_samples 39 [948979, 1036876] +processed_samples 12701 unjoint_samples 12700 joint_samples 36 [1047003, 480355] +processed_samples 12701 unjoint_samples 12700 joint_samples 37 [655761, 1046824] +processed_samples 12700 unjoint_samples 12700 joint_samples 37 [1047769, 964204] +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556ca36f1f00] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [1034246, 144151] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [1047866, 244647] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [602299, 1042682] +processed_samples 12801 unjoint_samples 12800 joint_samples 40 [136539, 1047218] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [1034246, 144151] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [1047866, 244647] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [1001425, 673583] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [602299, 1042682] +[h264 @ 0x55cecf9986c0] mmco: unref short failure +processed_samples 12801 unjoint_samples 12800 joint_samples 37 [942642, 1046824] +processed_samples 12801 unjoint_samples 12800 joint_samples 40 [136539, 1047218] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [1001425, 673583] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [1046666, 905746] +processed_samples 12801 unjoint_samples 12800 joint_samples 36 [1047003, 745441] +[h264 @ 0x55cecd757f00] mmco: unref short failure +processed_samples 12801 unjoint_samples 12800 joint_samples 37 [942642, 1046824] +[h264 @ 0x556ca08317c0] mmco: unref short failure +processed_samples 12801 unjoint_samples 12800 joint_samples 36 [1047003, 745441] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [1046666, 905746] +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecbfd8ac0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecbfd8ac0] mmco: unref short failure +[h264 @ 0x55cecbfd8ac0] mmco: unref short failure +[h264 @ 0x55cecbfd8ac0] mmco: unref short failure +[h264 @ 0x55cecbfd8ac0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [1047866, 527904] +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [1047866, 527904] +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +processed_samples 12900 unjoint_samples 12900 joint_samples 39 [1046666, 144957] +processed_samples 12900 unjoint_samples 12900 joint_samples 39 [1046666, 144957] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [1034246, 479113] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [1034246, 479113] +processed_samples 12901 unjoint_samples 12900 joint_samples 37 [39555, 1029965] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [1012464, 1011068] +processed_samples 12901 unjoint_samples 12900 joint_samples 37 [39555, 1029965] +processed_samples 12901 unjoint_samples 12900 joint_samples 38 [1043544, 247971] +processed_samples 12901 unjoint_samples 12900 joint_samples 40 [454778, 1047218] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [1012464, 1011068] +processed_samples 12901 unjoint_samples 12900 joint_samples 38 [1043544, 247971] +processed_samples 12901 unjoint_samples 12900 joint_samples 40 [454778, 1047218] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [839614, 1042682] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [839614, 1042682] +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556ca08317c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecc07e5c0] mmco: unref short failure +[h264 @ 0x55cecc07e5c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +processed_samples 13000 unjoint_samples 13000 joint_samples 39 [1048172, 95468] +processed_samples 13000 unjoint_samples 13000 joint_samples 38 [1047866, 800157] +[h264 @ 0x556c9b545e40] mmco: unref short failure +processed_samples 13000 unjoint_samples 13000 joint_samples 39 [1047109, 175074] +processed_samples 13000 unjoint_samples 13000 joint_samples 39 [1046666, 508158] +processed_samples 13000 unjoint_samples 13000 joint_samples 38 [1034246, 902797] +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +processed_samples 13000 unjoint_samples 13000 joint_samples 39 [1048172, 95468] +processed_samples 13001 unjoint_samples 13000 joint_samples 37 [282046, 1029965] +processed_samples 13001 unjoint_samples 13000 joint_samples 38 [1043544, 581367] +processed_samples 13000 unjoint_samples 13000 joint_samples 39 [1047109, 175074] +processed_samples 13000 unjoint_samples 13000 joint_samples 39 [1046666, 508158] +processed_samples 13001 unjoint_samples 13000 joint_samples 40 [740145, 1047218] +processed_samples 13000 unjoint_samples 13000 joint_samples 38 [1047866, 800157] +processed_samples 13000 unjoint_samples 13000 joint_samples 38 [1034246, 902797] +processed_samples 13001 unjoint_samples 13000 joint_samples 37 [282046, 1029965] +processed_samples 13001 unjoint_samples 13000 joint_samples 38 [1043544, 581367] +processed_samples 13001 unjoint_samples 13000 joint_samples 40 [740145, 1047218] +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9b7845c0] mmco: unref short failure +[h264 @ 0x556c9b7845c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [90715, 1047646] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [1048172, 333568] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [1046330, 151555] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [90715, 1047646] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [1047109, 467617] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [1048172, 333568] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [1046666, 760434] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [1046330, 151555] +processed_samples 13101 unjoint_samples 13100 joint_samples 37 [603529, 1029965] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [1047109, 467617] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [1046666, 760434] +processed_samples 13101 unjoint_samples 13100 joint_samples 40 [1047172, 1047218] +processed_samples 13101 unjoint_samples 13100 joint_samples 40 [1047172, 1047218] +processed_samples 13101 unjoint_samples 13100 joint_samples 37 [603529, 1029965] +processed_samples 13101 unjoint_samples 13100 joint_samples 38 [1043544, 943660] +processed_samples 13101 unjoint_samples 13100 joint_samples 38 [1043544, 943660] +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cec1144d40] mmco: unref short failure +[h264 @ 0x55cec1144d40] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [450705, 1047646] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [1047109, 862816] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [1046330, 486633] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [1048172, 613021] +processed_samples 13201 unjoint_samples 13200 joint_samples 39 [173906, 1044981] +processed_samples 13201 unjoint_samples 13200 joint_samples 41 [1047222, 311874] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [1046666, 1030291] +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +processed_samples 13201 unjoint_samples 13200 joint_samples 37 [808039, 1029965] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [450705, 1047646] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [1046330, 486633] +[h264 @ 0x55cecc338740] mmco: unref short failure +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [1047109, 862816] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [1048172, 613021] +processed_samples 13201 unjoint_samples 13200 joint_samples 39 [173906, 1044981] +processed_samples 13201 unjoint_samples 13200 joint_samples 41 [1047222, 311874] +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [1046666, 1030291] +processed_samples 13201 unjoint_samples 13200 joint_samples 37 [808039, 1029965] +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecd5a7440] mmco: unref short failure +[h264 @ 0x55cecd5a7440] mmco: unref short failure +[h264 @ 0x55cecd5a7440] mmco: unref short failure +[h264 @ 0x55cecd5a7440] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556ca08317c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +processed_samples 13300 unjoint_samples 13300 joint_samples 39 [1048172, 890992] +processed_samples 13300 unjoint_samples 13300 joint_samples 40 [1047109, 194209] +processed_samples 13300 unjoint_samples 13300 joint_samples 39 [1046330, 748328] +processed_samples 13300 unjoint_samples 13300 joint_samples 39 [739071, 1047646] +processed_samples 13301 unjoint_samples 13300 joint_samples 38 [1038573, 128562] +processed_samples 13300 unjoint_samples 13300 joint_samples 40 [1047109, 194209] +processed_samples 13300 unjoint_samples 13300 joint_samples 40 [351263, 1043085] +processed_samples 13300 unjoint_samples 13300 joint_samples 39 [739071, 1047646] +processed_samples 13300 unjoint_samples 13300 joint_samples 39 [1048172, 890992] +processed_samples 13300 unjoint_samples 13300 joint_samples 40 [351263, 1043085] +processed_samples 13301 unjoint_samples 13300 joint_samples 38 [1038573, 128562] +processed_samples 13300 unjoint_samples 13300 joint_samples 39 [1046330, 748328] +processed_samples 13301 unjoint_samples 13300 joint_samples 39 [649714, 1044981] +processed_samples 13301 unjoint_samples 13300 joint_samples 41 [1047222, 629401] +processed_samples 13301 unjoint_samples 13300 joint_samples 39 [649714, 1044981] +processed_samples 13301 unjoint_samples 13300 joint_samples 41 [1047222, 629401] +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55ced2b76540] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [1047109, 464484] +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [173290, 1039886] +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [173290, 1039886] +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [1047109, 464484] +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [810874, 1043085] +processed_samples 13400 unjoint_samples 13400 joint_samples 39 [1042102, 1047646] +processed_samples 13401 unjoint_samples 13400 joint_samples 41 [1047222, 1004562] +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [810874, 1043085] +processed_samples 13400 unjoint_samples 13400 joint_samples 39 [1042102, 1047646] +processed_samples 13401 unjoint_samples 13400 joint_samples 39 [982666, 1044981] +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [53792, 1048102] +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [53792, 1048102] +processed_samples 13401 unjoint_samples 13400 joint_samples 39 [982666, 1044981] +processed_samples 13401 unjoint_samples 13400 joint_samples 41 [1047222, 1004562] +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +processed_samples 13401 unjoint_samples 13400 joint_samples 38 [1038573, 412540] +processed_samples 13401 unjoint_samples 13400 joint_samples 38 [1038573, 412540] +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [454590, 1039886] +processed_samples 13501 unjoint_samples 13500 joint_samples 42 [1047222, 207936] +processed_samples 13501 unjoint_samples 13500 joint_samples 40 [264986, 1046855] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [1042102, 291582] +processed_samples 13500 unjoint_samples 13500 joint_samples 41 [59339, 1043085] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [1047109, 722337] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [327975, 1048102] +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [454590, 1039886] +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [1042102, 291582] +processed_samples 13500 unjoint_samples 13500 joint_samples 41 [59339, 1043085] +processed_samples 13501 unjoint_samples 13500 joint_samples 40 [264986, 1046855] +processed_samples 13501 unjoint_samples 13500 joint_samples 42 [1047222, 207936] +processed_samples 13501 unjoint_samples 13500 joint_samples 38 [1038573, 738751] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [1047109, 722337] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [327975, 1048102] +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +processed_samples 13501 unjoint_samples 13500 joint_samples 38 [1038573, 738751] +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x55cec1164740] mmco: unref short failure +[h264 @ 0x55cec1164740] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +processed_samples 13600 unjoint_samples 13600 joint_samples 40 [693120, 1048102] +processed_samples 13600 unjoint_samples 13600 joint_samples 40 [693120, 1048102] +processed_samples 13600 unjoint_samples 13600 joint_samples 40 [783243, 1039886] +processed_samples 13600 unjoint_samples 13600 joint_samples 41 [1047109, 131613] +processed_samples 13600 unjoint_samples 13600 joint_samples 40 [783243, 1039886] +processed_samples 13600 unjoint_samples 13600 joint_samples 41 [1047109, 131613] +processed_samples 13600 unjoint_samples 13600 joint_samples 41 [414651, 1043085] +processed_samples 13600 unjoint_samples 13600 joint_samples 40 [1042102, 543104] +processed_samples 13600 unjoint_samples 13600 joint_samples 41 [414651, 1043085] +processed_samples 13600 unjoint_samples 13600 joint_samples 40 [1042102, 543104] +processed_samples 13601 unjoint_samples 13600 joint_samples 40 [658263, 1046855] +processed_samples 13601 unjoint_samples 13600 joint_samples 42 [1047222, 540325] +processed_samples 13601 unjoint_samples 13600 joint_samples 42 [1047222, 540325] +processed_samples 13601 unjoint_samples 13600 joint_samples 40 [658263, 1046855] +processed_samples 13601 unjoint_samples 13600 joint_samples 39 [5133, 1046400] +processed_samples 13601 unjoint_samples 13600 joint_samples 39 [5133, 1046400] +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +processed_samples 13700 unjoint_samples 13700 joint_samples 40 [1042102, 887613] +processed_samples 13700 unjoint_samples 13700 joint_samples 41 [1047051, 10060] +processed_samples 13700 unjoint_samples 13700 joint_samples 40 [1042102, 887613] +processed_samples 13700 unjoint_samples 13700 joint_samples 41 [1047051, 10060] +processed_samples 13701 unjoint_samples 13700 joint_samples 39 [303761, 1046400] +processed_samples 13700 unjoint_samples 13700 joint_samples 41 [1047109, 416912] +processed_samples 13700 unjoint_samples 13700 joint_samples 41 [1047109, 416912] +processed_samples 13701 unjoint_samples 13700 joint_samples 42 [1047222, 831953] +processed_samples 13700 unjoint_samples 13700 joint_samples 41 [759351, 1043085] +processed_samples 13700 unjoint_samples 13700 joint_samples 41 [759351, 1043085] +processed_samples 13701 unjoint_samples 13700 joint_samples 39 [303761, 1046400] +processed_samples 13701 unjoint_samples 13700 joint_samples 42 [1047222, 831953] +processed_samples 13700 unjoint_samples 13700 joint_samples 40 [950531, 1048102] +processed_samples 13700 unjoint_samples 13700 joint_samples 40 [950531, 1048102] +processed_samples 13701 unjoint_samples 13700 joint_samples 40 [971525, 1046855] +processed_samples 13701 unjoint_samples 13700 joint_samples 40 [971525, 1046855] +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +processed_samples 13800 unjoint_samples 13800 joint_samples 42 [14637, 1046673] +processed_samples 13800 unjoint_samples 13800 joint_samples 42 [14637, 1046673] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [1047051, 291360] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [1046157, 195216] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [130333, 1047456] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [1046157, 195216] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [130333, 1047456] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [1047051, 291360] +processed_samples 13801 unjoint_samples 13800 joint_samples 43 [1047222, 34194] +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +processed_samples 13801 unjoint_samples 13800 joint_samples 41 [186228, 1046855] +processed_samples 13801 unjoint_samples 13800 joint_samples 43 [1047222, 34194] +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +processed_samples 13801 unjoint_samples 13800 joint_samples 41 [186228, 1046855] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [1047109, 844290] +processed_samples 13801 unjoint_samples 13800 joint_samples 39 [590950, 1046400] +processed_samples 13801 unjoint_samples 13800 joint_samples 39 [590950, 1046400] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [1047109, 844290] +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [455153, 1047456] +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +processed_samples 13900 unjoint_samples 13900 joint_samples 42 [1047109, 85060] +processed_samples 13900 unjoint_samples 13900 joint_samples 42 [363058, 1046673] +[h264 @ 0x55cecc6f5980] mmco: unref short failure +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [1047051, 527934] +processed_samples 13901 unjoint_samples 13900 joint_samples 41 [482262, 1046855] +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [1046157, 559295] +processed_samples 13901 unjoint_samples 13900 joint_samples 43 [1047222, 416497] +processed_samples 13901 unjoint_samples 13900 joint_samples 39 [907522, 1046400] +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [455153, 1047456] +processed_samples 13900 unjoint_samples 13900 joint_samples 42 [1047109, 85060] +processed_samples 13900 unjoint_samples 13900 joint_samples 42 [363058, 1046673] +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [1047051, 527934] +processed_samples 13901 unjoint_samples 13900 joint_samples 41 [482262, 1046855] +processed_samples 13901 unjoint_samples 13900 joint_samples 43 [1047222, 416497] +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [1046157, 559295] +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +processed_samples 13901 unjoint_samples 13900 joint_samples 39 [907522, 1046400] +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +processed_samples 14000 unjoint_samples 14000 joint_samples 41 [756067, 1047456] +processed_samples 14000 unjoint_samples 14000 joint_samples 41 [1047051, 901513] +processed_samples 14000 unjoint_samples 14000 joint_samples 41 [756067, 1047456] +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +processed_samples 14000 unjoint_samples 14000 joint_samples 42 [1047109, 368734] +processed_samples 14000 unjoint_samples 14000 joint_samples 42 [1047109, 368734] +processed_samples 14000 unjoint_samples 14000 joint_samples 41 [1046157, 870459] +processed_samples 14001 unjoint_samples 14000 joint_samples 40 [1047258, 238438] +processed_samples 14000 unjoint_samples 14000 joint_samples 42 [609381, 1046673] +processed_samples 14001 unjoint_samples 14000 joint_samples 43 [1047222, 636595] +processed_samples 14000 unjoint_samples 14000 joint_samples 41 [1047051, 901513] +processed_samples 14001 unjoint_samples 14000 joint_samples 41 [751437, 1046855] +processed_samples 14000 unjoint_samples 14000 joint_samples 42 [609381, 1046673] +processed_samples 14001 unjoint_samples 14000 joint_samples 43 [1047222, 636595] +processed_samples 14000 unjoint_samples 14000 joint_samples 41 [1046157, 870459] +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +processed_samples 14001 unjoint_samples 14000 joint_samples 40 [1047258, 238438] +processed_samples 14001 unjoint_samples 14000 joint_samples 41 [751437, 1046855] +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [1045282, 49592] +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [1045282, 49592] +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [1048166, 141207] +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [1047051, 120266] +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [1048166, 141207] +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [1047051, 120266] +processed_samples 14101 unjoint_samples 14100 joint_samples 42 [11737, 1046855] +processed_samples 14101 unjoint_samples 14100 joint_samples 42 [11737, 1046855] +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [1047109, 731806] +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [1047109, 731806] +processed_samples 14101 unjoint_samples 14100 joint_samples 40 [1047258, 686860] +processed_samples 14101 unjoint_samples 14100 joint_samples 40 [1047258, 686860] +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [856643, 1046673] +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [856643, 1046673] +processed_samples 14101 unjoint_samples 14100 joint_samples 43 [1047222, 1001623] +processed_samples 14101 unjoint_samples 14100 joint_samples 43 [1047222, 1001623] +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecd533f80] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [1045282, 290095] +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [1048166, 487276] +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [1045282, 290095] +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [1048166, 487276] +processed_samples 14200 unjoint_samples 14200 joint_samples 43 [28836, 1045505] +processed_samples 14200 unjoint_samples 14200 joint_samples 43 [28836, 1045505] +processed_samples 14200 unjoint_samples 14200 joint_samples 43 [1043285, 208164] +processed_samples 14200 unjoint_samples 14200 joint_samples 43 [1043285, 208164] +processed_samples 14201 unjoint_samples 14200 joint_samples 44 [258855, 1046961] +processed_samples 14201 unjoint_samples 14200 joint_samples 44 [258855, 1046961] +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [1047051, 475911] +processed_samples 14201 unjoint_samples 14200 joint_samples 42 [301224, 1046855] +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [1047051, 475911] +processed_samples 14201 unjoint_samples 14200 joint_samples 42 [301224, 1046855] +processed_samples 14201 unjoint_samples 14200 joint_samples 40 [1047258, 1003216] +processed_samples 14201 unjoint_samples 14200 joint_samples 40 [1047258, 1003216] +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9c068800] mmco: unref short failure +processed_samples 14300 unjoint_samples 14300 joint_samples 42 [1045282, 584933] +processed_samples 14300 unjoint_samples 14300 joint_samples 43 [293170, 1045505] +processed_samples 14300 unjoint_samples 14300 joint_samples 42 [1047051, 834325] +processed_samples 14301 unjoint_samples 14300 joint_samples 42 [549502, 1046855] +processed_samples 14301 unjoint_samples 14300 joint_samples 44 [535742, 1046961] +processed_samples 14301 unjoint_samples 14300 joint_samples 41 [1047258, 256045] +processed_samples 14300 unjoint_samples 14300 joint_samples 43 [1043285, 500408] +processed_samples 14300 unjoint_samples 14300 joint_samples 42 [1048166, 823627] +processed_samples 14300 unjoint_samples 14300 joint_samples 42 [1045282, 584933] +processed_samples 14300 unjoint_samples 14300 joint_samples 43 [293170, 1045505] +processed_samples 14300 unjoint_samples 14300 joint_samples 42 [1047051, 834325] +processed_samples 14301 unjoint_samples 14300 joint_samples 44 [535742, 1046961] +processed_samples 14301 unjoint_samples 14300 joint_samples 41 [1047258, 256045] +processed_samples 14300 unjoint_samples 14300 joint_samples 43 [1043285, 500408] +processed_samples 14301 unjoint_samples 14300 joint_samples 42 [549502, 1046855] +processed_samples 14300 unjoint_samples 14300 joint_samples 42 [1048166, 823627] +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9c068800] mmco: unref short failure +[h264 @ 0x556c9c068800] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [86202, 1045010] +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [86202, 1045010] +processed_samples 14400 unjoint_samples 14400 joint_samples 42 [1045282, 892858] +processed_samples 14400 unjoint_samples 14400 joint_samples 42 [1045282, 892858] +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [6932, 1047406] +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [6932, 1047406] +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [535845, 1045505] +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [535845, 1045505] +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [1043285, 796559] +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [1043285, 796559] +processed_samples 14401 unjoint_samples 14400 joint_samples 42 [803303, 1046855] +processed_samples 14401 unjoint_samples 14400 joint_samples 42 [803303, 1046855] +processed_samples 14401 unjoint_samples 14400 joint_samples 44 [807120, 1046961] +processed_samples 14401 unjoint_samples 14400 joint_samples 44 [807120, 1046961] +processed_samples 14401 unjoint_samples 14400 joint_samples 41 [1047258, 691926] +processed_samples 14401 unjoint_samples 14400 joint_samples 41 [1047258, 691926] +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55ced04fa540] mmco: unref short failure +[h264 @ 0x55ced04fa540] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +processed_samples 14500 unjoint_samples 14500 joint_samples 44 [131181, 1034444] +processed_samples 14500 unjoint_samples 14500 joint_samples 44 [131181, 1034444] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [361216, 1047406] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [209069, 1020980] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [209069, 1020980] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [361216, 1047406] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [566851, 1045010] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [566851, 1045010] +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [803032, 1045505] +processed_samples 14501 unjoint_samples 14500 joint_samples 45 [1044823, 20572] +processed_samples 14501 unjoint_samples 14500 joint_samples 45 [1044823, 20572] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [803032, 1045505] +processed_samples 14501 unjoint_samples 14500 joint_samples 43 [97715, 1046908] +processed_samples 14501 unjoint_samples 14500 joint_samples 43 [97715, 1046908] +processed_samples 14501 unjoint_samples 14500 joint_samples 41 [1047258, 1047338] +processed_samples 14501 unjoint_samples 14500 joint_samples 41 [1047258, 1047338] +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecc07e5c0] mmco: unref short failure +[h264 @ 0x55cecc07e5c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [561648, 1020980] +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [561648, 1020980] +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [653539, 1047406] +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [653539, 1047406] +processed_samples 14600 unjoint_samples 14600 joint_samples 44 [381920, 1034444] +processed_samples 14600 unjoint_samples 14600 joint_samples 44 [381920, 1034444] +processed_samples 14600 unjoint_samples 14600 joint_samples 44 [1044564, 86825] +processed_samples 14600 unjoint_samples 14600 joint_samples 44 [1044564, 86825] +processed_samples 14601 unjoint_samples 14600 joint_samples 43 [450234, 1046908] +processed_samples 14601 unjoint_samples 14600 joint_samples 43 [450234, 1046908] +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [939352, 1045010] +processed_samples 14601 unjoint_samples 14600 joint_samples 45 [1044823, 368354] +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [939352, 1045010] +processed_samples 14601 unjoint_samples 14600 joint_samples 42 [1047258, 359212] +processed_samples 14601 unjoint_samples 14600 joint_samples 42 [1047258, 359212] +processed_samples 14601 unjoint_samples 14600 joint_samples 45 [1044823, 368354] +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ced2b76540] mmco: unref short failure +[h264 @ 0x55ced2b76540] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cec1144d40] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecc07e5c0] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +processed_samples 14700 unjoint_samples 14700 joint_samples 44 [1044564, 334997] +processed_samples 14700 unjoint_samples 14700 joint_samples 44 [616537, 1034444] +processed_samples 14700 unjoint_samples 14700 joint_samples 43 [917280, 1020980] +processed_samples 14700 unjoint_samples 14700 joint_samples 44 [148109, 1048064] +processed_samples 14701 unjoint_samples 14700 joint_samples 45 [1044823, 667096] +processed_samples 14701 unjoint_samples 14700 joint_samples 43 [671883, 1046908] +processed_samples 14700 unjoint_samples 14700 joint_samples 43 [906980, 1047406] +processed_samples 14700 unjoint_samples 14700 joint_samples 44 [1044564, 334997] +processed_samples 14701 unjoint_samples 14700 joint_samples 42 [1047258, 719364] +processed_samples 14700 unjoint_samples 14700 joint_samples 44 [616537, 1034444] +processed_samples 14700 unjoint_samples 14700 joint_samples 44 [148109, 1048064] +processed_samples 14701 unjoint_samples 14700 joint_samples 45 [1044823, 667096] +processed_samples 14700 unjoint_samples 14700 joint_samples 43 [917280, 1020980] +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +processed_samples 14701 unjoint_samples 14700 joint_samples 42 [1047258, 719364] +processed_samples 14701 unjoint_samples 14700 joint_samples 43 [671883, 1046908] +processed_samples 14700 unjoint_samples 14700 joint_samples 43 [906980, 1047406] +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55ced2b76540] mmco: unref short failure +[h264 @ 0x55ced2b76540] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9b64a1c0] mmco: unref short failure +[h264 @ 0x556c9b64a1c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [172243, 1030988] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [172243, 1030988] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [391354, 1048064] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [391354, 1048064] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [1029066, 201120] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [1029066, 201120] +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [1044564, 650770] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [1044564, 650770] +processed_samples 14801 unjoint_samples 14800 joint_samples 43 [86060, 1036571] +processed_samples 14801 unjoint_samples 14800 joint_samples 43 [86060, 1036571] +processed_samples 14801 unjoint_samples 14800 joint_samples 43 [947975, 1046908] +processed_samples 14801 unjoint_samples 14800 joint_samples 43 [947975, 1046908] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [1024670, 1034444] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [1024670, 1034444] +processed_samples 14801 unjoint_samples 14800 joint_samples 45 [1044823, 986503] +processed_samples 14801 unjoint_samples 14800 joint_samples 45 [1044823, 986503] +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [707538, 1048064] +processed_samples 14900 unjoint_samples 14900 joint_samples 45 [1047224, 301435] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [1029066, 481964] +processed_samples 14900 unjoint_samples 14900 joint_samples 45 [1047224, 301435] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [1029066, 481964] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [422413, 1030988] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [707538, 1048064] +processed_samples 14901 unjoint_samples 14900 joint_samples 44 [195617, 1046908] +processed_samples 14901 unjoint_samples 14900 joint_samples 46 [248893, 1046757] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [422413, 1030988] +processed_samples 14901 unjoint_samples 14900 joint_samples 46 [248893, 1046757] +[h264 @ 0x556c9be4ff40] mmco: unref short failure +processed_samples 14901 unjoint_samples 14900 joint_samples 44 [195617, 1046908] +[h264 @ 0x55cecc3fe200] mmco: unref short failure +processed_samples 14901 unjoint_samples 14900 joint_samples 43 [355368, 1036571] +processed_samples 14901 unjoint_samples 14900 joint_samples 43 [355368, 1036571] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [1044564, 984132] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [1044564, 984132] +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [702411, 1030988] +processed_samples 15000 unjoint_samples 15000 joint_samples 45 [1047224, 657758] +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [702411, 1030988] +processed_samples 15000 unjoint_samples 15000 joint_samples 45 [1046691, 327170] +processed_samples 15000 unjoint_samples 15000 joint_samples 45 [1047224, 657758] +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [1019438, 1048064] +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [1029066, 875795] +processed_samples 15000 unjoint_samples 15000 joint_samples 45 [1046691, 327170] +processed_samples 15001 unjoint_samples 15000 joint_samples 44 [602692, 1046908] +processed_samples 15001 unjoint_samples 15000 joint_samples 46 [646981, 1046757] +processed_samples 15001 unjoint_samples 15000 joint_samples 43 [608980, 1036571] +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [1019438, 1048064] +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [1029066, 875795] +processed_samples 15001 unjoint_samples 15000 joint_samples 44 [602692, 1046908] +processed_samples 15001 unjoint_samples 15000 joint_samples 43 [608980, 1036571] +processed_samples 15001 unjoint_samples 15000 joint_samples 46 [646981, 1046757] +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecc696840] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cec1164740] mmco: unref short failure +[h264 @ 0x556c9f02e200] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [1046395, 62544] +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [1046527, 268716] +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [1046691, 636595] +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [1047224, 902550] +processed_samples 15101 unjoint_samples 15100 joint_samples 43 [1018134, 1036571] +processed_samples 15100 unjoint_samples 15100 joint_samples 44 [982965, 1030988] +processed_samples 15101 unjoint_samples 15100 joint_samples 44 [853466, 1046908] +[h264 @ 0x556c9fa49a00] mmco: unref short failure +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [1046395, 62544] +processed_samples 15101 unjoint_samples 15100 joint_samples 46 [947999, 1046757] +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [1046527, 268716] +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [1046691, 636595] +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [1047224, 902550] +processed_samples 15100 unjoint_samples 15100 joint_samples 44 [982965, 1030988] +processed_samples 15101 unjoint_samples 15100 joint_samples 44 [853466, 1046908] +processed_samples 15101 unjoint_samples 15100 joint_samples 43 [1018134, 1036571] +processed_samples 15101 unjoint_samples 15100 joint_samples 46 [947999, 1046757] +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9f2c6800] mmco: unref short failure +[h264 @ 0x556c9f2c6800] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [116305, 1045508] +processed_samples 15200 unjoint_samples 15200 joint_samples 46 [1047224, 211644] +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [1046395, 331396] +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [1046527, 577854] +processed_samples 15201 unjoint_samples 15200 joint_samples 45 [986883, 216689] +processed_samples 15201 unjoint_samples 15200 joint_samples 47 [231977, 1046757] +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +processed_samples 15201 unjoint_samples 15200 joint_samples 44 [1042743, 321834] +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [1046691, 913308] +[h264 @ 0x556c9e009040] mmco: unref short failure +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [116305, 1045508] +processed_samples 15200 unjoint_samples 15200 joint_samples 46 [1047224, 211644] +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [1046395, 331396] +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [1046527, 577854] +processed_samples 15201 unjoint_samples 15200 joint_samples 45 [986883, 216689] +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +processed_samples 15201 unjoint_samples 15200 joint_samples 47 [231977, 1046757] +processed_samples 15201 unjoint_samples 15200 joint_samples 44 [1042743, 321834] +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [1046691, 913308] +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [374911, 1045508] +processed_samples 15300 unjoint_samples 15300 joint_samples 46 [67831, 1046938] +processed_samples 15300 unjoint_samples 15300 joint_samples 46 [1047224, 517525] +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [1046395, 646822] +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +processed_samples 15301 unjoint_samples 15300 joint_samples 45 [986883, 461460] +processed_samples 15301 unjoint_samples 15300 joint_samples 47 [660634, 1046757] +processed_samples 15301 unjoint_samples 15300 joint_samples 44 [1042743, 636324] +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [1046527, 953444] +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +processed_samples 15300 unjoint_samples 15300 joint_samples 46 [67831, 1046938] +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [374911, 1045508] +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [1046395, 646822] +processed_samples 15300 unjoint_samples 15300 joint_samples 46 [1047224, 517525] +[h264 @ 0x556c9b5cb880] mmco: unref short failure +processed_samples 15301 unjoint_samples 15300 joint_samples 45 [986883, 461460] +processed_samples 15301 unjoint_samples 15300 joint_samples 47 [660634, 1046757] +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [1046527, 953444] +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +processed_samples 15301 unjoint_samples 15300 joint_samples 44 [1042743, 636324] +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +processed_samples 15400 unjoint_samples 15400 joint_samples 46 [174535, 1038890] +processed_samples 15400 unjoint_samples 15400 joint_samples 45 [763233, 1045508] +processed_samples 15400 unjoint_samples 15400 joint_samples 46 [359506, 1046938] +processed_samples 15400 unjoint_samples 15400 joint_samples 46 [174535, 1038890] +processed_samples 15400 unjoint_samples 15400 joint_samples 45 [763233, 1045508] +processed_samples 15400 unjoint_samples 15400 joint_samples 46 [1047224, 816585] +processed_samples 15401 unjoint_samples 15400 joint_samples 45 [986883, 766810] +processed_samples 15400 unjoint_samples 15400 joint_samples 46 [359506, 1046938] +processed_samples 15401 unjoint_samples 15400 joint_samples 45 [1046632, 79533] +processed_samples 15401 unjoint_samples 15400 joint_samples 47 [929317, 1046757] +processed_samples 15400 unjoint_samples 15400 joint_samples 46 [1047224, 816585] +processed_samples 15400 unjoint_samples 15400 joint_samples 45 [1046395, 976230] +processed_samples 15401 unjoint_samples 15400 joint_samples 45 [1046632, 79533] +processed_samples 15401 unjoint_samples 15400 joint_samples 45 [986883, 766810] +processed_samples 15401 unjoint_samples 15400 joint_samples 47 [929317, 1046757] +processed_samples 15400 unjoint_samples 15400 joint_samples 45 [1046395, 976230] +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cec1164740] mmco: unref short failure +[h264 @ 0x55cec1164740] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556ca08317c0] mmco: unref short failure +[h264 @ 0x556ca08317c0] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [1046683, 13914] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [1046683, 13914] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [503911, 1038890] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [503911, 1038890] +processed_samples 15501 unjoint_samples 15500 joint_samples 48 [1022619, 288991] +processed_samples 15500 unjoint_samples 15500 joint_samples 47 [1047224, 33108] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [1046395, 196465] +processed_samples 15500 unjoint_samples 15500 joint_samples 47 [1047224, 33108] +processed_samples 15501 unjoint_samples 15500 joint_samples 48 [1022619, 288991] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [655305, 1046938] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [1046395, 196465] +processed_samples 15501 unjoint_samples 15500 joint_samples 45 [1046632, 468895] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [655305, 1046938] +processed_samples 15501 unjoint_samples 15500 joint_samples 45 [1046632, 468895] +processed_samples 15501 unjoint_samples 15500 joint_samples 45 [995947, 996118] +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +processed_samples 15501 unjoint_samples 15500 joint_samples 45 [995947, 996118] +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55ced4bfef40] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9f3e3200] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9f02e200] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [1046683, 325352] +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +processed_samples 15600 unjoint_samples 15600 joint_samples 47 [1047224, 276406] +[h264 @ 0x55cecf1bf040] mmco: unref short failure +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [1046683, 325352] +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [1046395, 563251] +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [960770, 1046938] +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +processed_samples 15600 unjoint_samples 15600 joint_samples 47 [1047224, 276406] +processed_samples 15601 unjoint_samples 15600 joint_samples 46 [201576, 1028642] +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [1046395, 563251] +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [960770, 1046938] +processed_samples 15601 unjoint_samples 15600 joint_samples 48 [1022619, 564576] +processed_samples 15601 unjoint_samples 15600 joint_samples 45 [1046632, 744001] +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [767713, 1038890] +processed_samples 15601 unjoint_samples 15600 joint_samples 46 [201576, 1028642] +processed_samples 15601 unjoint_samples 15600 joint_samples 48 [1022619, 564576] +processed_samples 15601 unjoint_samples 15600 joint_samples 45 [1046632, 744001] +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [767713, 1038890] +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f02e200] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +processed_samples 15700 unjoint_samples 15700 joint_samples 47 [34975, 1045124] +processed_samples 15700 unjoint_samples 15700 joint_samples 47 [34975, 1045124] +processed_samples 15701 unjoint_samples 15700 joint_samples 46 [503210, 1028642] +processed_samples 15700 unjoint_samples 15700 joint_samples 47 [1042032, 281142] +processed_samples 15700 unjoint_samples 15700 joint_samples 46 [1046683, 832994] +processed_samples 15700 unjoint_samples 15700 joint_samples 47 [1042032, 281142] +processed_samples 15700 unjoint_samples 15700 joint_samples 46 [1046683, 832994] +processed_samples 15700 unjoint_samples 15700 joint_samples 46 [1046395, 865347] +processed_samples 15701 unjoint_samples 15700 joint_samples 48 [1022619, 921760] +processed_samples 15701 unjoint_samples 15700 joint_samples 46 [1046632, 27055] +processed_samples 15700 unjoint_samples 15700 joint_samples 47 [1047224, 561929] +processed_samples 15700 unjoint_samples 15700 joint_samples 46 [1046395, 865347] +processed_samples 15701 unjoint_samples 15700 joint_samples 46 [1046632, 27055] +processed_samples 15700 unjoint_samples 15700 joint_samples 47 [1047224, 561929] +processed_samples 15701 unjoint_samples 15700 joint_samples 46 [503210, 1028642] +processed_samples 15701 unjoint_samples 15700 joint_samples 48 [1022619, 921760] +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [361501, 970494] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [434694, 1045124] +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +processed_samples 15801 unjoint_samples 15800 joint_samples 49 [1045160, 150338] +[h264 @ 0x556c9b545e40] mmco: unref short failure +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [1046395, 134138] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [1042032, 566821] +processed_samples 15801 unjoint_samples 15800 joint_samples 46 [1046632, 370373] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [1047224, 831080] +processed_samples 15801 unjoint_samples 15800 joint_samples 46 [762620, 1028642] +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [361501, 970494] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [434694, 1045124] +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +processed_samples 15801 unjoint_samples 15800 joint_samples 49 [1045160, 150338] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [1046395, 134138] +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [1047224, 831080] +processed_samples 15801 unjoint_samples 15800 joint_samples 46 [1046632, 370373] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [1042032, 566821] +processed_samples 15801 unjoint_samples 15800 joint_samples 46 [762620, 1028642] +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9f02e200] mmco: unref short failure +[h264 @ 0x556c9f02e200] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f02e200] mmco: unref short failure +[h264 @ 0x556c9f02e200] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [722959, 1045124] +processed_samples 15900 unjoint_samples 15900 joint_samples 48 [135757, 1046963] +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [1046395, 572512] +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [630169, 970494] +processed_samples 15900 unjoint_samples 15900 joint_samples 48 [135757, 1046963] +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [1046395, 572512] +processed_samples 15901 unjoint_samples 15900 joint_samples 47 [959449, 321917] +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [722959, 1045124] +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [1042032, 864719] +processed_samples 15901 unjoint_samples 15900 joint_samples 49 [1045160, 466383] +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [630169, 970494] +processed_samples 15901 unjoint_samples 15900 joint_samples 47 [959449, 321917] +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [1042032, 864719] +processed_samples 15901 unjoint_samples 15900 joint_samples 49 [1045160, 466383] +processed_samples 15901 unjoint_samples 15900 joint_samples 46 [1046632, 656549] +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +processed_samples 15901 unjoint_samples 15900 joint_samples 46 [1046632, 656549] +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc696840] mmco: unref short failure +[h264 @ 0x55cecc696840] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9c773b80] mmco: unref short failure +[h264 @ 0x556c9c773b80] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9c773b80] mmco: unref short failure +[h264 @ 0x556c9c773b80] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +processed_samples 16000 unjoint_samples 16000 joint_samples 48 [1047198, 202954] +[h264 @ 0x55cecf760f80] mmco: unref short failure +processed_samples 16000 unjoint_samples 16000 joint_samples 48 [1044328, 105717] +[h264 @ 0x556c9a9a5580] mmco: unref short failure +processed_samples 16000 unjoint_samples 16000 joint_samples 48 [518045, 1046963] +processed_samples 16001 unjoint_samples 16000 joint_samples 46 [1046632, 935896] +processed_samples 16001 unjoint_samples 16000 joint_samples 47 [959449, 641205] +processed_samples 16000 unjoint_samples 16000 joint_samples 47 [1046395, 942303] +processed_samples 16000 unjoint_samples 16000 joint_samples 47 [934607, 970494] +processed_samples 16001 unjoint_samples 16000 joint_samples 49 [1045160, 722553] +processed_samples 16000 unjoint_samples 16000 joint_samples 48 [1047198, 202954] +processed_samples 16000 unjoint_samples 16000 joint_samples 48 [1044328, 105717] +processed_samples 16000 unjoint_samples 16000 joint_samples 48 [518045, 1046963] +processed_samples 16000 unjoint_samples 16000 joint_samples 47 [1046395, 942303] +[h264 @ 0x55cecf3fb740] mmco: unref short failure +processed_samples 16001 unjoint_samples 16000 joint_samples 47 [959449, 641205] +processed_samples 16000 unjoint_samples 16000 joint_samples 47 [934607, 970494] +processed_samples 16001 unjoint_samples 16000 joint_samples 46 [1046632, 935896] +processed_samples 16001 unjoint_samples 16000 joint_samples 49 [1045160, 722553] +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [1045756, 166097] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [1047198, 508242] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [1045756, 166097] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [1046395, 224855] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [1047198, 508242] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [1046395, 224855] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [1044328, 609983] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [1044328, 609983] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [822602, 1046963] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [822602, 1046963] +processed_samples 16101 unjoint_samples 16100 joint_samples 47 [1046632, 184962] +processed_samples 16101 unjoint_samples 16100 joint_samples 47 [1046632, 184962] +processed_samples 16101 unjoint_samples 16100 joint_samples 47 [959449, 909705] +processed_samples 16101 unjoint_samples 16100 joint_samples 49 [1045160, 1016219] +processed_samples 16101 unjoint_samples 16100 joint_samples 47 [959449, 909705] +processed_samples 16101 unjoint_samples 16100 joint_samples 49 [1045160, 1016219] +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x556c9f3e3200] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +processed_samples 16200 unjoint_samples 16200 joint_samples 49 [1046592, 85300] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [1047198, 919413] +processed_samples 16200 unjoint_samples 16200 joint_samples 49 [1046592, 85300] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [1047198, 919413] +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [1046395, 574785] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [1046395, 574785] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [1045756, 381960] +processed_samples 16201 unjoint_samples 16200 joint_samples 48 [1045192, 38316] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [1045756, 381960] +processed_samples 16201 unjoint_samples 16200 joint_samples 48 [1045192, 38316] +processed_samples 16201 unjoint_samples 16200 joint_samples 50 [1045160, 288442] +processed_samples 16201 unjoint_samples 16200 joint_samples 50 [1045160, 288442] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [1044328, 906182] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [1044328, 906182] +processed_samples 16201 unjoint_samples 16200 joint_samples 47 [1046632, 488158] +processed_samples 16201 unjoint_samples 16200 joint_samples 47 [1046632, 488158] +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +processed_samples 16300 unjoint_samples 16300 joint_samples 49 [1047198, 178017] +processed_samples 16301 unjoint_samples 16300 joint_samples 48 [1045192, 407863] +processed_samples 16300 unjoint_samples 16300 joint_samples 49 [1046703, 189106] +processed_samples 16300 unjoint_samples 16300 joint_samples 48 [1045756, 696515] +[h264 @ 0x55cecc62f840] mmco: unref short failure +processed_samples 16300 unjoint_samples 16300 joint_samples 49 [1046592, 399295] +processed_samples 16301 unjoint_samples 16300 joint_samples 50 [1045160, 660040] +processed_samples 16300 unjoint_samples 16300 joint_samples 49 [1047198, 178017] +processed_samples 16300 unjoint_samples 16300 joint_samples 49 [1046703, 189106] +processed_samples 16301 unjoint_samples 16300 joint_samples 48 [1045192, 407863] +processed_samples 16300 unjoint_samples 16300 joint_samples 49 [1046592, 399295] +processed_samples 16300 unjoint_samples 16300 joint_samples 48 [1046395, 866206] +processed_samples 16300 unjoint_samples 16300 joint_samples 48 [1045756, 696515] +processed_samples 16301 unjoint_samples 16300 joint_samples 50 [1045160, 660040] +processed_samples 16301 unjoint_samples 16300 joint_samples 47 [1046632, 874554] +processed_samples 16300 unjoint_samples 16300 joint_samples 48 [1046395, 866206] +processed_samples 16301 unjoint_samples 16300 joint_samples 47 [1046632, 874554] +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [1047198, 425126] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [1047198, 425126] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [126263, 1047016] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [1046703, 529198] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [1046592, 660421] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [126263, 1047016] +processed_samples 16400 unjoint_samples 16400 joint_samples 48 [1045756, 982042] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [1046703, 529198] +processed_samples 16400 unjoint_samples 16400 joint_samples 48 [1045756, 982042] +processed_samples 16401 unjoint_samples 16400 joint_samples 48 [1045192, 729330] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [1046592, 660421] +processed_samples 16401 unjoint_samples 16400 joint_samples 48 [324010, 1001356] +processed_samples 16401 unjoint_samples 16400 joint_samples 48 [1045192, 729330] +processed_samples 16401 unjoint_samples 16400 joint_samples 50 [1045160, 1009089] +processed_samples 16401 unjoint_samples 16400 joint_samples 48 [324010, 1001356] +processed_samples 16401 unjoint_samples 16400 joint_samples 50 [1045160, 1009089] +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [1045756, 205423] +processed_samples 16500 unjoint_samples 16500 joint_samples 50 [1047044, 100693] +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [429779, 1047016] +processed_samples 16501 unjoint_samples 16500 joint_samples 49 [164379, 1027568] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [1046703, 809542] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [1047198, 782483] +processed_samples 16501 unjoint_samples 16500 joint_samples 51 [338950, 1044674] +processed_samples 16501 unjoint_samples 16500 joint_samples 48 [613584, 1001356] +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [1045756, 205423] +processed_samples 16500 unjoint_samples 16500 joint_samples 50 [1047044, 100693] +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [429779, 1047016] +processed_samples 16501 unjoint_samples 16500 joint_samples 49 [164379, 1027568] +processed_samples 16501 unjoint_samples 16500 joint_samples 48 [613584, 1001356] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [1046703, 809542] +processed_samples 16501 unjoint_samples 16500 joint_samples 51 [338950, 1044674] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [1047198, 782483] +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9cf24bc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +processed_samples 16600 unjoint_samples 16600 joint_samples 50 [246980, 1007086] +processed_samples 16600 unjoint_samples 16600 joint_samples 49 [1045756, 490556] +processed_samples 16600 unjoint_samples 16600 joint_samples 50 [93002, 1024100] +processed_samples 16600 unjoint_samples 16600 joint_samples 50 [1047044, 357407] +processed_samples 16600 unjoint_samples 16600 joint_samples 50 [246980, 1007086] +processed_samples 16600 unjoint_samples 16600 joint_samples 49 [1045756, 490556] +processed_samples 16600 unjoint_samples 16600 joint_samples 50 [93002, 1024100] +processed_samples 16600 unjoint_samples 16600 joint_samples 50 [1047044, 357407] +processed_samples 16600 unjoint_samples 16600 joint_samples 49 [714779, 1047016] +processed_samples 16601 unjoint_samples 16600 joint_samples 49 [389427, 1027568] +processed_samples 16601 unjoint_samples 16600 joint_samples 49 [389427, 1027568] +processed_samples 16600 unjoint_samples 16600 joint_samples 49 [714779, 1047016] +processed_samples 16601 unjoint_samples 16600 joint_samples 51 [626220, 1044674] +processed_samples 16601 unjoint_samples 16600 joint_samples 51 [626220, 1044674] +processed_samples 16601 unjoint_samples 16600 joint_samples 48 [1003763, 1001356] +processed_samples 16601 unjoint_samples 16600 joint_samples 48 [1003763, 1001356] +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +processed_samples 16700 unjoint_samples 16700 joint_samples 50 [382497, 1024100] +processed_samples 16700 unjoint_samples 16700 joint_samples 50 [382497, 1024100] +processed_samples 16700 unjoint_samples 16700 joint_samples 50 [487735, 1007086] +processed_samples 16700 unjoint_samples 16700 joint_samples 50 [487735, 1007086] +[h264 @ 0x556c9be345c0] mmco: unref short failure +processed_samples 16701 unjoint_samples 16700 joint_samples 52 [55409, 1046935] +[h264 @ 0x55cecf1bf040] mmco: unref short failure +processed_samples 16701 unjoint_samples 16700 joint_samples 52 [55409, 1046935] +processed_samples 16700 unjoint_samples 16700 joint_samples 50 [1047044, 671876] +processed_samples 16700 unjoint_samples 16700 joint_samples 50 [1047044, 671876] +processed_samples 16701 unjoint_samples 16700 joint_samples 49 [699797, 1027568] +processed_samples 16701 unjoint_samples 16700 joint_samples 49 [699797, 1027568] +processed_samples 16701 unjoint_samples 16700 joint_samples 49 [272280, 1045045] +processed_samples 16700 unjoint_samples 16700 joint_samples 49 [1045756, 867924] +processed_samples 16700 unjoint_samples 16700 joint_samples 49 [1045756, 867924] +processed_samples 16701 unjoint_samples 16700 joint_samples 49 [272280, 1045045] +processed_samples 16700 unjoint_samples 16700 joint_samples 49 [1012672, 1047016] +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +processed_samples 16700 unjoint_samples 16700 joint_samples 49 [1012672, 1047016] +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +FileNotFoundError datasets/LMM/lmms-lab/LLaVA-Video-178K/liwei_youtube_videos/videos/youtube_video_2024/ytb_y6ReUXtm_VE.mp4 +FileNotFoundError datasets/LMM/lmms-lab/LLaVA-Video-178K/liwei_youtube_videos/videos/youtube_video_2024/ytb_y6ReUXtm_VE.mp4 +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [266894, 1046938] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [713252, 1024100] +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [229932, 1047016] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [794183, 1007086] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [1047044, 992989] +processed_samples 16802 unjoint_samples 16800 joint_samples 52 [347380, 1046935] +processed_samples 16801 unjoint_samples 16800 joint_samples 49 [566930, 1045045] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [266894, 1046938] +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [229932, 1047016] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [713252, 1024100] +processed_samples 16801 unjoint_samples 16800 joint_samples 49 [1027118, 1027568] +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [794183, 1007086] +processed_samples 16801 unjoint_samples 16800 joint_samples 49 [1027118, 1027568] +processed_samples 16802 unjoint_samples 16800 joint_samples 52 [347380, 1046935] +processed_samples 16801 unjoint_samples 16800 joint_samples 49 [566930, 1045045] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [1047044, 992989] +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc696840] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecc696840] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [688184, 1046938] +processed_samples 16900 unjoint_samples 16900 joint_samples 51 [241751, 1040689] +processed_samples 16900 unjoint_samples 16900 joint_samples 51 [388534, 1042771] +processed_samples 16901 unjoint_samples 16900 joint_samples 50 [270961, 1046159] +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [569833, 1047016] +[h264 @ 0x556c9b61bec0] mmco: unref short failure +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [1020824, 1024100] +processed_samples 16901 unjoint_samples 16900 joint_samples 49 [835915, 1045045] +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +processed_samples 16902 unjoint_samples 16900 joint_samples 52 [626221, 1046935] +[h264 @ 0x55cecd1dc600] mmco: unref short failure +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [688184, 1046938] +processed_samples 16900 unjoint_samples 16900 joint_samples 51 [241751, 1040689] +processed_samples 16900 unjoint_samples 16900 joint_samples 51 [388534, 1042771] +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [569833, 1047016] +processed_samples 16901 unjoint_samples 16900 joint_samples 50 [270961, 1046159] +processed_samples 16901 unjoint_samples 16900 joint_samples 49 [835915, 1045045] +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [1020824, 1024100] +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +processed_samples 16902 unjoint_samples 16900 joint_samples 52 [626221, 1046935] +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55ced4961a80] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55ced2b76540] mmco: unref short failure +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +[h264 @ 0x55cecbcbcfc0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556ca08317c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55ced2b76540] mmco: unref short failure +[h264 @ 0x55ced2b76540] mmco: unref short failure +[h264 @ 0x55ced2b76540] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556ca08317c0] mmco: unref short failure +[h264 @ 0x556ca08317c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +processed_samples 17000 unjoint_samples 17000 joint_samples 50 [852588, 1047016] +processed_samples 17000 unjoint_samples 17000 joint_samples 51 [757454, 1042771] +processed_samples 17000 unjoint_samples 17000 joint_samples 51 [1023236, 352536] +processed_samples 17000 unjoint_samples 17000 joint_samples 51 [507190, 1040689] +processed_samples 17000 unjoint_samples 17000 joint_samples 50 [952633, 1046938] +processed_samples 17000 unjoint_samples 17000 joint_samples 50 [952633, 1046938] +processed_samples 17000 unjoint_samples 17000 joint_samples 51 [1023236, 352536] +processed_samples 17001 unjoint_samples 17000 joint_samples 50 [1047199, 163070] +processed_samples 17001 unjoint_samples 17000 joint_samples 50 [531617, 1046159] +processed_samples 17000 unjoint_samples 17000 joint_samples 51 [757454, 1042771] +processed_samples 17000 unjoint_samples 17000 joint_samples 51 [507190, 1040689] +processed_samples 17001 unjoint_samples 17000 joint_samples 50 [531617, 1046159] +processed_samples 17000 unjoint_samples 17000 joint_samples 50 [852588, 1047016] +processed_samples 17001 unjoint_samples 17000 joint_samples 50 [1047199, 163070] +processed_samples 17002 unjoint_samples 17000 joint_samples 52 [987645, 1046935] +processed_samples 17002 unjoint_samples 17000 joint_samples 52 [987645, 1046935] +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecccaa700] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9f02e200] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [1045052, 277085] +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [100674, 1047820] +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [1023236, 802762] +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [100674, 1047820] +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [1045052, 277085] +[h264 @ 0x556c9d990ec0] mmco: unref short failure +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [1023236, 802762] +[h264 @ 0x55ced4c03600] mmco: unref short failure +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [943297, 1040689] +processed_samples 17102 unjoint_samples 17100 joint_samples 53 [224216, 1047435] +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [943297, 1040689] +processed_samples 17101 unjoint_samples 17100 joint_samples 50 [750628, 1046159] +processed_samples 17101 unjoint_samples 17100 joint_samples 50 [1047199, 502619] +processed_samples 17101 unjoint_samples 17100 joint_samples 50 [750628, 1046159] +processed_samples 17102 unjoint_samples 17100 joint_samples 53 [224216, 1047435] +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [1043333, 1045109] +processed_samples 17101 unjoint_samples 17100 joint_samples 50 [1047199, 502619] +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [1043333, 1045109] +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9d2d7900] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9d2d7900] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +processed_samples 17200 unjoint_samples 17200 joint_samples 52 [1040485, 62733] +processed_samples 17200 unjoint_samples 17200 joint_samples 52 [1040485, 62733] +processed_samples 17200 unjoint_samples 17200 joint_samples 52 [1047145, 221754] +processed_samples 17200 unjoint_samples 17200 joint_samples 52 [1047145, 221754] +processed_samples 17200 unjoint_samples 17200 joint_samples 52 [278244, 1046180] +processed_samples 17200 unjoint_samples 17200 joint_samples 51 [1045052, 554953] +processed_samples 17200 unjoint_samples 17200 joint_samples 51 [1045052, 554953] +processed_samples 17200 unjoint_samples 17200 joint_samples 51 [498473, 1047820] +processed_samples 17200 unjoint_samples 17200 joint_samples 52 [278244, 1046180] +processed_samples 17200 unjoint_samples 17200 joint_samples 51 [498473, 1047820] +processed_samples 17202 unjoint_samples 17200 joint_samples 53 [514772, 1047435] +processed_samples 17201 unjoint_samples 17200 joint_samples 50 [1017160, 1046159] +processed_samples 17202 unjoint_samples 17200 joint_samples 53 [514772, 1047435] +processed_samples 17201 unjoint_samples 17200 joint_samples 50 [1017160, 1046159] +processed_samples 17201 unjoint_samples 17200 joint_samples 50 [1047199, 817985] +processed_samples 17201 unjoint_samples 17200 joint_samples 50 [1047199, 817985] +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9f3e3200] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +processed_samples 17300 unjoint_samples 17300 joint_samples 52 [1040485, 339214] +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +processed_samples 17301 unjoint_samples 17300 joint_samples 51 [461088, 1046613] +processed_samples 17300 unjoint_samples 17300 joint_samples 52 [1047145, 664409] +processed_samples 17300 unjoint_samples 17300 joint_samples 52 [663645, 1046180] +processed_samples 17301 unjoint_samples 17300 joint_samples 51 [1047199, 102550] +processed_samples 17300 unjoint_samples 17300 joint_samples 51 [805108, 1047820] +processed_samples 17300 unjoint_samples 17300 joint_samples 52 [134544, 917579] +processed_samples 17300 unjoint_samples 17300 joint_samples 52 [1040485, 339214] +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +processed_samples 17301 unjoint_samples 17300 joint_samples 51 [461088, 1046613] +processed_samples 17300 unjoint_samples 17300 joint_samples 52 [134544, 917579] +processed_samples 17300 unjoint_samples 17300 joint_samples 52 [663645, 1046180] +processed_samples 17300 unjoint_samples 17300 joint_samples 52 [1047145, 664409] +processed_samples 17301 unjoint_samples 17300 joint_samples 51 [1047199, 102550] +processed_samples 17300 unjoint_samples 17300 joint_samples 51 [805108, 1047820] +processed_samples 17302 unjoint_samples 17300 joint_samples 53 [893795, 1047435] +processed_samples 17302 unjoint_samples 17300 joint_samples 53 [893795, 1047435] +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [1045386, 71695] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [654767, 917579] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [1040485, 760331] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [1000459, 1046180] +processed_samples 17402 unjoint_samples 17400 joint_samples 54 [247864, 1047435] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [1045386, 71695] +processed_samples 17401 unjoint_samples 17400 joint_samples 51 [1047199, 432866] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [654767, 917579] +processed_samples 17401 unjoint_samples 17400 joint_samples 51 [843114, 1046613] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [1040485, 760331] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [1000459, 1046180] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [1047145, 984825] +processed_samples 17402 unjoint_samples 17400 joint_samples 54 [247864, 1047435] +processed_samples 17401 unjoint_samples 17400 joint_samples 51 [1047199, 432866] +processed_samples 17401 unjoint_samples 17400 joint_samples 51 [843114, 1046613] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [1047145, 984825] +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9c773b80] [h264 @ 0x55cecc3fe200] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x55cecc3fe200] [h264 @ 0x556c9c773b80] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x55cec1144d40] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecccaa700] mmco: unref short failure +[h264 @ 0x55cecccaa700] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9b7845c0] mmco: unref short failure +[h264 @ 0x556c9b7845c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +processed_samples 17500 unjoint_samples 17500 joint_samples 53 [20789, 1045341] +processed_samples 17500 unjoint_samples 17500 joint_samples 53 [20789, 1045341] +processed_samples 17500 unjoint_samples 17500 joint_samples 53 [200637, 1046639] +processed_samples 17500 unjoint_samples 17500 joint_samples 53 [200637, 1046639] +processed_samples 17500 unjoint_samples 17500 joint_samples 53 [407747, 1018490] +processed_samples 17500 unjoint_samples 17500 joint_samples 53 [407747, 1018490] +processed_samples 17500 unjoint_samples 17500 joint_samples 52 [1045386, 384147] +processed_samples 17500 unjoint_samples 17500 joint_samples 52 [1045386, 384147] +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +processed_samples 17500 unjoint_samples 17500 joint_samples 52 [880662, 917579] +processed_samples 17501 unjoint_samples 17500 joint_samples 52 [1043762, 54719] +processed_samples 17501 unjoint_samples 17500 joint_samples 52 [1043762, 54719] +processed_samples 17500 unjoint_samples 17500 joint_samples 52 [880662, 917579] +processed_samples 17501 unjoint_samples 17500 joint_samples 51 [1047199, 692072] +processed_samples 17501 unjoint_samples 17500 joint_samples 51 [1047199, 692072] +processed_samples 17502 unjoint_samples 17500 joint_samples 54 [570998, 1047435] +processed_samples 17502 unjoint_samples 17500 joint_samples 54 [570998, 1047435] +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +processed_samples 17600 unjoint_samples 17600 joint_samples 53 [66898, 1028062] +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +processed_samples 17600 unjoint_samples 17600 joint_samples 53 [512326, 1046639] +processed_samples 17600 unjoint_samples 17600 joint_samples 53 [408471, 1045341] +processed_samples 17600 unjoint_samples 17600 joint_samples 53 [66898, 1028062] +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +processed_samples 17600 unjoint_samples 17600 joint_samples 53 [512326, 1046639] +processed_samples 17600 unjoint_samples 17600 joint_samples 53 [408471, 1045341] +processed_samples 17600 unjoint_samples 17600 joint_samples 52 [1045386, 698066] +processed_samples 17600 unjoint_samples 17600 joint_samples 53 [828572, 1018490] +processed_samples 17601 unjoint_samples 17600 joint_samples 52 [1043762, 341333] +processed_samples 17600 unjoint_samples 17600 joint_samples 53 [828572, 1018490] +processed_samples 17601 unjoint_samples 17600 joint_samples 51 [1047199, 981700] +processed_samples 17601 unjoint_samples 17600 joint_samples 52 [1043762, 341333] +processed_samples 17600 unjoint_samples 17600 joint_samples 52 [1045386, 698066] +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +processed_samples 17601 unjoint_samples 17600 joint_samples 51 [1047199, 981700] +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +processed_samples 17602 unjoint_samples 17600 joint_samples 54 [871304, 1047435] +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +processed_samples 17602 unjoint_samples 17600 joint_samples 54 [871304, 1047435] +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9ab74d00] mmco: unref short failure +[h264 @ 0x556c9ab74d00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9f02e200] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [349866, 1028062] +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [1002996, 1046639] +processed_samples 17701 unjoint_samples 17700 joint_samples 52 [1043762, 635230] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [349866, 1028062] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [641771, 1045341] +processed_samples 17700 unjoint_samples 17700 joint_samples 52 [1045386, 954008] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [641771, 1045341] +processed_samples 17701 unjoint_samples 17700 joint_samples 52 [1043762, 635230] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [1002996, 1046639] +processed_samples 17700 unjoint_samples 17700 joint_samples 52 [1045386, 954008] +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9f02e200] mmco: unref short failure +processed_samples 17702 unjoint_samples 17700 joint_samples 55 [1046965, 52323] +processed_samples 17702 unjoint_samples 17700 joint_samples 55 [1046965, 52323] +processed_samples 17701 unjoint_samples 17700 joint_samples 52 [407548, 1046457] +processed_samples 17701 unjoint_samples 17700 joint_samples 52 [407548, 1046457] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [1046448, 1045944] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [1046448, 1045944] +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecd8b7f40] mmco: unref short failure +[h264 @ 0x55cecd8b7f40] mmco: unref short failure +[h264 @ 0x55cecd8b7f40] mmco: unref short failure +[h264 @ 0x55cecd8b7f40] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9f02e200] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [686552, 1028062] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [686552, 1028062] +processed_samples 17800 unjoint_samples 17800 joint_samples 54 [388987, 1045944] +processed_samples 17800 unjoint_samples 17800 joint_samples 54 [1028043, 241335] +processed_samples 17800 unjoint_samples 17800 joint_samples 54 [388987, 1045944] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [250342, 1043114] +processed_samples 17800 unjoint_samples 17800 joint_samples 54 [1028043, 241335] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [918578, 1045341] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [918578, 1045341] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [250342, 1043114] +processed_samples 17802 unjoint_samples 17800 joint_samples 55 [1046965, 415851] +processed_samples 17801 unjoint_samples 17800 joint_samples 52 [696381, 1046457] +processed_samples 17802 unjoint_samples 17800 joint_samples 55 [1046965, 415851] +processed_samples 17801 unjoint_samples 17800 joint_samples 52 [1043762, 903879] +processed_samples 17801 unjoint_samples 17800 joint_samples 52 [696381, 1046457] +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x556c9db761c0] mmco: unref short failure +processed_samples 17801 unjoint_samples 17800 joint_samples 52 [1043762, 903879] +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9e7e4a00] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +processed_samples 17900 unjoint_samples 17900 joint_samples 54 [1047340, 175301] +processed_samples 17900 unjoint_samples 17900 joint_samples 54 [614329, 1045944] +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [565634, 1043114] +processed_samples 17900 unjoint_samples 17900 joint_samples 54 [1028043, 604207] +processed_samples 17901 unjoint_samples 17900 joint_samples 53 [118043, 1046831] +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [1003559, 1028062] +processed_samples 17902 unjoint_samples 17900 joint_samples 55 [1046965, 764348] +processed_samples 17900 unjoint_samples 17900 joint_samples 54 [1047340, 175301] +processed_samples 17901 unjoint_samples 17900 joint_samples 53 [118043, 1046831] +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [565634, 1043114] +processed_samples 17900 unjoint_samples 17900 joint_samples 54 [1028043, 604207] +processed_samples 17900 unjoint_samples 17900 joint_samples 54 [614329, 1045944] +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [1003559, 1028062] +processed_samples 17901 unjoint_samples 17900 joint_samples 53 [2476, 1046457] +processed_samples 17902 unjoint_samples 17900 joint_samples 55 [1046965, 764348] +processed_samples 17901 unjoint_samples 17900 joint_samples 53 [2476, 1046457] +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9b6ce740] mmco: unref short failure +[h264 @ 0x556c9b6ce740] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [333222, 1044080] +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [333222, 1044080] +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [1047340, 490423] +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [1047340, 490423] +processed_samples 18000 unjoint_samples 18000 joint_samples 53 [829284, 1043114] +processed_samples 18000 unjoint_samples 18000 joint_samples 53 [829284, 1043114] +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [931403, 1045944] +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [931403, 1045944] +processed_samples 18002 unjoint_samples 18000 joint_samples 56 [134511, 1046879] +processed_samples 18001 unjoint_samples 18000 joint_samples 53 [455585, 1046457] +processed_samples 18002 unjoint_samples 18000 joint_samples 56 [134511, 1046879] +processed_samples 18001 unjoint_samples 18000 joint_samples 53 [341682, 1046831] +processed_samples 18001 unjoint_samples 18000 joint_samples 53 [455585, 1046457] +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [1028043, 888545] +[h264 @ 0x556c9edb9c00] mmco: unref short failure +processed_samples 18001 unjoint_samples 18000 joint_samples 53 [341682, 1046831] +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [1028043, 888545] +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecf760f80] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [622255, 1044080] +processed_samples 18100 unjoint_samples 18100 joint_samples 55 [138070, 1046768] +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [37690, 1045313] +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [622255, 1044080] +processed_samples 18100 unjoint_samples 18100 joint_samples 55 [1046287, 145492] +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [1047340, 814467] +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [37690, 1045313] +processed_samples 18100 unjoint_samples 18100 joint_samples 55 [138070, 1046768] +processed_samples 18101 unjoint_samples 18100 joint_samples 53 [632539, 1046831] +processed_samples 18100 unjoint_samples 18100 joint_samples 55 [1046287, 145492] +processed_samples 18102 unjoint_samples 18100 joint_samples 56 [403553, 1046879] +processed_samples 18101 unjoint_samples 18100 joint_samples 53 [632539, 1046831] +processed_samples 18101 unjoint_samples 18100 joint_samples 53 [867927, 1046457] +processed_samples 18101 unjoint_samples 18100 joint_samples 53 [867927, 1046457] +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [1047340, 814467] +processed_samples 18102 unjoint_samples 18100 joint_samples 56 [403553, 1046879] +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x556c9db761c0] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x55cecf96a400] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +processed_samples 18200 unjoint_samples 18200 joint_samples 55 [97623, 1009878] +processed_samples 18200 unjoint_samples 18200 joint_samples 55 [97623, 1009878] +[h264 @ 0x55cecfd7f240] mmco: unref short failure +processed_samples 18200 unjoint_samples 18200 joint_samples 54 [341614, 1045313] +processed_samples 18200 unjoint_samples 18200 joint_samples 55 [1046287, 489000] +processed_samples 18200 unjoint_samples 18200 joint_samples 55 [401847, 1046768] +processed_samples 18201 unjoint_samples 18200 joint_samples 54 [114770, 1046978] +processed_samples 18201 unjoint_samples 18200 joint_samples 54 [114770, 1046978] +processed_samples 18200 unjoint_samples 18200 joint_samples 54 [341614, 1045313] +processed_samples 18200 unjoint_samples 18200 joint_samples 55 [401847, 1046768] +processed_samples 18200 unjoint_samples 18200 joint_samples 55 [1046287, 489000] +processed_samples 18201 unjoint_samples 18200 joint_samples 53 [898785, 1046831] +processed_samples 18200 unjoint_samples 18200 joint_samples 54 [1047836, 1044080] +processed_samples 18200 unjoint_samples 18200 joint_samples 54 [1047836, 1044080] +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +processed_samples 18201 unjoint_samples 18200 joint_samples 53 [898785, 1046831] +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +processed_samples 18202 unjoint_samples 18200 joint_samples 56 [859410, 1046879] +processed_samples 18202 unjoint_samples 18200 joint_samples 56 [859410, 1046879] +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [421099, 1009878] +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [421099, 1009878] +processed_samples 18301 unjoint_samples 18300 joint_samples 54 [1046776, 106618] +processed_samples 18300 unjoint_samples 18300 joint_samples 54 [590097, 1045313] +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [1047836, 275163] +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [1047836, 275163] +processed_samples 18301 unjoint_samples 18300 joint_samples 54 [1046776, 106618] +processed_samples 18300 unjoint_samples 18300 joint_samples 54 [590097, 1045313] +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [676708, 1046768] +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [1046287, 792613] +processed_samples 18302 unjoint_samples 18300 joint_samples 57 [75191, 1046879] +processed_samples 18301 unjoint_samples 18300 joint_samples 54 [383435, 1046978] +processed_samples 18302 unjoint_samples 18300 joint_samples 57 [75191, 1046879] +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [676708, 1046768] +processed_samples 18301 unjoint_samples 18300 joint_samples 54 [383435, 1046978] +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [1046287, 792613] +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x556c9ef31d00] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +processed_samples 18400 unjoint_samples 18400 joint_samples 56 [84896, 1044558] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [723429, 1009878] +processed_samples 18401 unjoint_samples 18400 joint_samples 54 [637016, 1046978] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [1031345, 1046768] +processed_samples 18400 unjoint_samples 18400 joint_samples 56 [84896, 1044558] +[h264 @ 0x55cecf9986c0] mmco: unref short failure +processed_samples 18401 unjoint_samples 18400 joint_samples 54 [1046776, 329545] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [1047836, 622522] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [723429, 1009878] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [1031345, 1046768] +processed_samples 18401 unjoint_samples 18400 joint_samples 54 [637016, 1046978] +processed_samples 18400 unjoint_samples 18400 joint_samples 54 [1016012, 1045313] +processed_samples 18402 unjoint_samples 18400 joint_samples 57 [433930, 1046879] +processed_samples 18401 unjoint_samples 18400 joint_samples 54 [1046776, 329545] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [1047836, 622522] +processed_samples 18400 unjoint_samples 18400 joint_samples 54 [1016012, 1045313] +[h264 @ 0x556c9b93f280] mmco: unref short failure +processed_samples 18402 unjoint_samples 18400 joint_samples 57 [433930, 1046879] +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556c9f3bacc0] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecd25acc0] mmco: unref short failure +processed_samples 18500 unjoint_samples 18500 joint_samples 55 [1047836, 944466] +processed_samples 18500 unjoint_samples 18500 joint_samples 55 [292345, 1047961] +processed_samples 18500 unjoint_samples 18500 joint_samples 56 [366935, 1044558] +processed_samples 18500 unjoint_samples 18500 joint_samples 56 [1046598, 280109] +processed_samples 18500 unjoint_samples 18500 joint_samples 55 [1041043, 1033593] +[h264 @ 0x556c9b5d4980] mmco: unref short failure +processed_samples 18501 unjoint_samples 18500 joint_samples 54 [931283, 1046978] +processed_samples 18500 unjoint_samples 18500 joint_samples 55 [1047836, 944466] +processed_samples 18501 unjoint_samples 18500 joint_samples 54 [1046776, 659654] +processed_samples 18502 unjoint_samples 18500 joint_samples 57 [782307, 1046879] +processed_samples 18500 unjoint_samples 18500 joint_samples 55 [292345, 1047961] +processed_samples 18500 unjoint_samples 18500 joint_samples 56 [366935, 1044558] +processed_samples 18500 unjoint_samples 18500 joint_samples 56 [1046598, 280109] +processed_samples 18501 unjoint_samples 18500 joint_samples 54 [1046776, 659654] +processed_samples 18500 unjoint_samples 18500 joint_samples 55 [1041043, 1033593] +processed_samples 18501 unjoint_samples 18500 joint_samples 54 [931283, 1046978] +processed_samples 18502 unjoint_samples 18500 joint_samples 57 [782307, 1046879] +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9c773b80] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] illegal short term buffer state detected +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b547500] illegal short term buffer state detected +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecfd7f240] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +processed_samples 18600 unjoint_samples 18600 joint_samples 56 [255883, 1038383] +processed_samples 18600 unjoint_samples 18600 joint_samples 56 [365775, 1037994] +processed_samples 18600 unjoint_samples 18600 joint_samples 56 [255883, 1038383] +processed_samples 18600 unjoint_samples 18600 joint_samples 56 [1046598, 635259] +processed_samples 18600 unjoint_samples 18600 joint_samples 56 [365775, 1037994] +processed_samples 18600 unjoint_samples 18600 joint_samples 55 [595550, 1047961] +processed_samples 18600 unjoint_samples 18600 joint_samples 56 [1046598, 635259] +processed_samples 18601 unjoint_samples 18600 joint_samples 55 [1039272, 176257] +processed_samples 18600 unjoint_samples 18600 joint_samples 56 [636742, 1044558] +processed_samples 18600 unjoint_samples 18600 joint_samples 55 [595550, 1047961] +processed_samples 18600 unjoint_samples 18600 joint_samples 56 [636742, 1044558] +processed_samples 18601 unjoint_samples 18600 joint_samples 54 [1046776, 928456] +processed_samples 18601 unjoint_samples 18600 joint_samples 54 [1046776, 928456] +processed_samples 18601 unjoint_samples 18600 joint_samples 55 [1039272, 176257] +processed_samples 18602 unjoint_samples 18600 joint_samples 57 [1045676, 1046879] +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +processed_samples 18602 unjoint_samples 18600 joint_samples 57 [1045676, 1046879] +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[mov,mp4,m4a,3gp,3g2,mj2 @ 0x55ceced9b3c0] stream 1, offset 0x14000d8: partial file +[mov,mp4,m4a,3gp,3g2,mj2 @ 0x556c9f82e880] stream 1, offset 0x14000d8: partial file +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +processed_samples 18700 unjoint_samples 18700 joint_samples 57 [1046598, 9250] +processed_samples 18700 unjoint_samples 18700 joint_samples 57 [1046598, 9250] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [1027806, 54115] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [1027806, 54115] +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [743654, 1037994] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [743654, 1037994] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [641251, 1038383] +processed_samples 18702 unjoint_samples 18700 joint_samples 58 [382798, 1046879] +processed_samples 18702 unjoint_samples 18700 joint_samples 55 [243129, 1045773] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [641251, 1038383] +processed_samples 18702 unjoint_samples 18700 joint_samples 58 [382798, 1046879] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [1044919, 1046810] +processed_samples 18702 unjoint_samples 18700 joint_samples 55 [243129, 1045773] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [1044919, 1046810] +processed_samples 18701 unjoint_samples 18700 joint_samples 55 [1039272, 455213] +processed_samples 18701 unjoint_samples 18700 joint_samples 55 [1039272, 455213] +[h264 @ 0x556c9b6ce740] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x55cecd8b7f40] mmco: unref short failure +[h264 @ 0x55cecd8b7f40] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +processed_samples 18800 unjoint_samples 18800 joint_samples 57 [1013757, 82478] +processed_samples 18800 unjoint_samples 18800 joint_samples 57 [1013757, 82478] +processed_samples 18800 unjoint_samples 18800 joint_samples 57 [250559, 1046810] +processed_samples 18800 unjoint_samples 18800 joint_samples 57 [250559, 1046810] +processed_samples 18800 unjoint_samples 18800 joint_samples 57 [1046598, 274893] +processed_samples 18800 unjoint_samples 18800 joint_samples 57 [1046598, 274893] +processed_samples 18800 unjoint_samples 18800 joint_samples 56 [1027806, 378161] +processed_samples 18800 unjoint_samples 18800 joint_samples 56 [1027806, 378161] +processed_samples 18801 unjoint_samples 18800 joint_samples 55 [1039272, 814317] +processed_samples 18801 unjoint_samples 18800 joint_samples 55 [1039272, 814317] +processed_samples 18800 unjoint_samples 18800 joint_samples 56 [899277, 1038383] +processed_samples 18800 unjoint_samples 18800 joint_samples 56 [899277, 1038383] +processed_samples 18802 unjoint_samples 18800 joint_samples 58 [680020, 1046879] +processed_samples 18802 unjoint_samples 18800 joint_samples 58 [680020, 1046879] +processed_samples 18802 unjoint_samples 18800 joint_samples 55 [572839, 1045773] +processed_samples 18802 unjoint_samples 18800 joint_samples 55 [572839, 1045773] +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +processed_samples 18900 unjoint_samples 18900 joint_samples 57 [212147, 1047332] +[h264 @ 0x556c9d990ec0] mmco: unref short failure +processed_samples 18900 unjoint_samples 18900 joint_samples 57 [1013757, 366184] +processed_samples 18900 unjoint_samples 18900 joint_samples 57 [212147, 1047332] +[h264 @ 0x55ceccca7dc0] mmco: unref short failure +processed_samples 18900 unjoint_samples 18900 joint_samples 57 [523043, 1046810] +processed_samples 18900 unjoint_samples 18900 joint_samples 57 [1046598, 541164] +processed_samples 18901 unjoint_samples 18900 joint_samples 56 [146796, 977588] +processed_samples 18900 unjoint_samples 18900 joint_samples 57 [1013757, 366184] +processed_samples 18900 unjoint_samples 18900 joint_samples 56 [1027806, 628922] +processed_samples 18900 unjoint_samples 18900 joint_samples 57 [523043, 1046810] +processed_samples 18901 unjoint_samples 18900 joint_samples 56 [146796, 977588] +processed_samples 18900 unjoint_samples 18900 joint_samples 57 [1046598, 541164] +processed_samples 18902 unjoint_samples 18900 joint_samples 58 [951100, 1046879] +processed_samples 18900 unjoint_samples 18900 joint_samples 56 [1027806, 628922] +[h264 @ 0x556c9f1ff900] mmco: unref short failure +processed_samples 18902 unjoint_samples 18900 joint_samples 58 [951100, 1046879] +processed_samples 18902 unjoint_samples 18900 joint_samples 55 [850909, 1045773] +[h264 @ 0x55cec1164740] mmco: unref short failure +processed_samples 18902 unjoint_samples 18900 joint_samples 55 [850909, 1045773] +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cec1164740] mmco: unref short failure +[h264 @ 0x55cec1164740] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55ced04fa540] mmco: unref short failure +[h264 @ 0x55ced04fa540] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b7845c0] mmco: unref short failure +[h264 @ 0x556c9b7845c0] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [1013757, 699908] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [1013757, 699908] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [35803, 1046472] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [35803, 1046472] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [561105, 1047332] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [561105, 1047332] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [863896, 1046810] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [863896, 1046810] +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +processed_samples 19001 unjoint_samples 19000 joint_samples 56 [437015, 977588] +processed_samples 19001 unjoint_samples 19000 joint_samples 56 [437015, 977588] +processed_samples 19002 unjoint_samples 19000 joint_samples 59 [1040487, 372395] +processed_samples 19002 unjoint_samples 19000 joint_samples 59 [1040487, 372395] +processed_samples 19002 unjoint_samples 19000 joint_samples 56 [1036955, 209907] +processed_samples 19002 unjoint_samples 19000 joint_samples 56 [1036955, 209907] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [1046598, 948068] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [1046598, 948068] +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecbea91c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [863469, 1047332] +processed_samples 19100 unjoint_samples 19100 joint_samples 58 [124577, 1046810] +processed_samples 19100 unjoint_samples 19100 joint_samples 58 [1047192, 169672] +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [863469, 1047332] +processed_samples 19100 unjoint_samples 19100 joint_samples 58 [124577, 1046810] +processed_samples 19100 unjoint_samples 19100 joint_samples 58 [1047192, 169672] +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [478337, 1046472] +processed_samples 19101 unjoint_samples 19100 joint_samples 56 [792019, 977588] +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [1013757, 960323] +processed_samples 19102 unjoint_samples 19100 joint_samples 59 [1040487, 664694] +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [478337, 1046472] +processed_samples 19101 unjoint_samples 19100 joint_samples 56 [792019, 977588] +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [1013757, 960323] +processed_samples 19102 unjoint_samples 19100 joint_samples 59 [1040487, 664694] +processed_samples 19102 unjoint_samples 19100 joint_samples 56 [1036955, 475826] +processed_samples 19102 unjoint_samples 19100 joint_samples 56 [1036955, 475826] +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +processed_samples 19200 unjoint_samples 19200 joint_samples 58 [1044435, 180737] +processed_samples 19200 unjoint_samples 19200 joint_samples 58 [1042307, 305730] +processed_samples 19200 unjoint_samples 19200 joint_samples 57 [795868, 1046472] +processed_samples 19200 unjoint_samples 19200 joint_samples 58 [1047192, 527577] +processed_samples 19200 unjoint_samples 19200 joint_samples 58 [470556, 1046810] +processed_samples 19200 unjoint_samples 19200 joint_samples 58 [1044435, 180737] +processed_samples 19200 unjoint_samples 19200 joint_samples 58 [1042307, 305730] +processed_samples 19200 unjoint_samples 19200 joint_samples 57 [795868, 1046472] +processed_samples 19200 unjoint_samples 19200 joint_samples 58 [1047192, 527577] +processed_samples 19200 unjoint_samples 19200 joint_samples 58 [470556, 1046810] +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +processed_samples 19202 unjoint_samples 19200 joint_samples 56 [1036955, 722468] +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +processed_samples 19201 unjoint_samples 19200 joint_samples 56 [1006958, 1008499] +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +processed_samples 19202 unjoint_samples 19200 joint_samples 59 [1040487, 1020849] +processed_samples 19202 unjoint_samples 19200 joint_samples 56 [1036955, 722468] +processed_samples 19201 unjoint_samples 19200 joint_samples 56 [1006958, 1008499] +processed_samples 19202 unjoint_samples 19200 joint_samples 59 [1040487, 1020849] +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x556c9c1f57c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9b806400] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [1044435, 435486] +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [1042307, 610058] +processed_samples 19301 unjoint_samples 19300 joint_samples 57 [269294, 1035065] +processed_samples 19302 unjoint_samples 19300 joint_samples 60 [250758, 1047302] +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [1047192, 885702] +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [816119, 1046810] +processed_samples 19300 unjoint_samples 19300 joint_samples 57 [998473, 1046472] +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [1044435, 435486] +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [1042307, 610058] +processed_samples 19302 unjoint_samples 19300 joint_samples 57 [1044094, 20570] +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [1047192, 885702] +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [816119, 1046810] +processed_samples 19301 unjoint_samples 19300 joint_samples 57 [269294, 1035065] +processed_samples 19300 unjoint_samples 19300 joint_samples 57 [998473, 1046472] +processed_samples 19302 unjoint_samples 19300 joint_samples 60 [250758, 1047302] +processed_samples 19302 unjoint_samples 19300 joint_samples 57 [1044094, 20570] +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecbe27840] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecca4b9c0] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9f003880] mmco: unref short failure +[h264 @ 0x556c9f003880] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +processed_samples 19400 unjoint_samples 19400 joint_samples 59 [95361, 1046551] +processed_samples 19400 unjoint_samples 19400 joint_samples 59 [1045303, 171690] +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [421779, 1046472] +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [1044435, 754536] +processed_samples 19401 unjoint_samples 19400 joint_samples 57 [698614, 1035065] +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [1042307, 918888] +processed_samples 19402 unjoint_samples 19400 joint_samples 57 [1044094, 360264] +processed_samples 19402 unjoint_samples 19400 joint_samples 60 [577855, 1047302] +processed_samples 19400 unjoint_samples 19400 joint_samples 59 [95361, 1046551] +processed_samples 19400 unjoint_samples 19400 joint_samples 59 [1045303, 171690] +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [421779, 1046472] +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [1042307, 918888] +[h264 @ 0x55cecbca0840] mmco: unref short failure +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [1044435, 754536] +processed_samples 19401 unjoint_samples 19400 joint_samples 57 [698614, 1035065] +processed_samples 19402 unjoint_samples 19400 joint_samples 57 [1044094, 360264] +processed_samples 19402 unjoint_samples 19400 joint_samples 60 [577855, 1047302] +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b25a680] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecd77d700] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55ced04fa540] mmco: unref short failure +processed_samples 19500 unjoint_samples 19500 joint_samples 59 [60710, 1033150] +processed_samples 19500 unjoint_samples 19500 joint_samples 59 [60710, 1033150] +processed_samples 19500 unjoint_samples 19500 joint_samples 59 [178644, 1026697] +processed_samples 19500 unjoint_samples 19500 joint_samples 59 [178644, 1026697] +processed_samples 19500 unjoint_samples 19500 joint_samples 59 [496240, 1046551] +processed_samples 19500 unjoint_samples 19500 joint_samples 59 [496240, 1046551] +processed_samples 19500 unjoint_samples 19500 joint_samples 58 [822337, 1046472] +processed_samples 19500 unjoint_samples 19500 joint_samples 58 [822337, 1046472] +processed_samples 19500 unjoint_samples 19500 joint_samples 59 [1045303, 457724] +processed_samples 19500 unjoint_samples 19500 joint_samples 59 [1045303, 457724] +processed_samples 19501 unjoint_samples 19500 joint_samples 57 [1021289, 1035065] +processed_samples 19501 unjoint_samples 19500 joint_samples 57 [1021289, 1035065] +processed_samples 19502 unjoint_samples 19500 joint_samples 57 [1044094, 699118] +processed_samples 19502 unjoint_samples 19500 joint_samples 57 [1044094, 699118] +processed_samples 19502 unjoint_samples 19500 joint_samples 60 [842699, 1047302] +processed_samples 19502 unjoint_samples 19500 joint_samples 60 [842699, 1047302] +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cec1144d40] mmco: unref short failure +[h264 @ 0x55cec1144d40] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9aa36f00] mmco: unref short failure +[h264 @ 0x556c9f1aaf40] mmco: unref short failure +[h264 @ 0x556c9f1aaf40] mmco: unref short failure +[h264 @ 0x556c9f1aaf40] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [1048293, 63716] +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [445723, 1026697] +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [483491, 1033150] +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [1045303, 711290] +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [1048293, 63716] +processed_samples 19602 unjoint_samples 19600 joint_samples 61 [1047297, 189543] +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [445723, 1026697] +processed_samples 19602 unjoint_samples 19600 joint_samples 58 [120172, 1037898] +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [483491, 1033150] +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [804583, 1046551] +processed_samples 19602 unjoint_samples 19600 joint_samples 61 [1047297, 189543] +processed_samples 19602 unjoint_samples 19600 joint_samples 58 [120172, 1037898] +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [1045303, 711290] +processed_samples 19601 unjoint_samples 19600 joint_samples 58 [449494, 1046692] +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [804583, 1046551] +processed_samples 19601 unjoint_samples 19600 joint_samples 58 [449494, 1046692] +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecc6495c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x556c9b623080] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [1048293, 374557] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [823996, 1033150] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [740467, 1026697] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [1045303, 1016537] +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +processed_samples 19702 unjoint_samples 19700 joint_samples 58 [431802, 1037898] +processed_samples 19701 unjoint_samples 19700 joint_samples 58 [737671, 1046692] +processed_samples 19702 unjoint_samples 19700 joint_samples 61 [1047297, 541021] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [1048293, 374557] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [1041165, 1046551] +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [823996, 1033150] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [740467, 1026697] +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [1045303, 1016537] +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +processed_samples 19702 unjoint_samples 19700 joint_samples 58 [431802, 1037898] +processed_samples 19702 unjoint_samples 19700 joint_samples 61 [1047297, 541021] +processed_samples 19701 unjoint_samples 19700 joint_samples 58 [737671, 1046692] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [1041165, 1046551] +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecfbffbc0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +processed_samples 19800 unjoint_samples 19800 joint_samples 59 [1048293, 668351] +processed_samples 19800 unjoint_samples 19800 joint_samples 59 [1048293, 668351] +processed_samples 19800 unjoint_samples 19800 joint_samples 60 [1045373, 218751] +processed_samples 19800 unjoint_samples 19800 joint_samples 60 [1045373, 218751] +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +processed_samples 19800 unjoint_samples 19800 joint_samples 60 [257500, 1046551] +processed_samples 19800 unjoint_samples 19800 joint_samples 60 [257500, 1046551] +processed_samples 19800 unjoint_samples 19800 joint_samples 60 [270184, 1047548] +processed_samples 19800 unjoint_samples 19800 joint_samples 60 [270184, 1047548] +processed_samples 19800 unjoint_samples 19800 joint_samples 60 [96081, 1035591] +processed_samples 19800 unjoint_samples 19800 joint_samples 60 [96081, 1035591] +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +processed_samples 19802 unjoint_samples 19800 joint_samples 61 [1047297, 924973] +processed_samples 19802 unjoint_samples 19800 joint_samples 61 [1047297, 924973] +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556ca00b4a00] mmco: unref short failure +processed_samples 19801 unjoint_samples 19800 joint_samples 58 [1028968, 1046692] +processed_samples 19801 unjoint_samples 19800 joint_samples 58 [1028968, 1046692] +processed_samples 19802 unjoint_samples 19800 joint_samples 58 [763695, 1037898] +processed_samples 19802 unjoint_samples 19800 joint_samples 58 [763695, 1037898] +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x556c9c6ed400] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9e14e500] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9e14e500] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +processed_samples 19900 unjoint_samples 19900 joint_samples 60 [399743, 1035591] +processed_samples 19900 unjoint_samples 19900 joint_samples 60 [399743, 1035591] +processed_samples 19900 unjoint_samples 19900 joint_samples 60 [1045373, 602346] +processed_samples 19900 unjoint_samples 19900 joint_samples 60 [1045373, 602346] +processed_samples 19901 unjoint_samples 19900 joint_samples 59 [203474, 1046692] +processed_samples 19900 unjoint_samples 19900 joint_samples 60 [635301, 1047548] +processed_samples 19901 unjoint_samples 19900 joint_samples 59 [203474, 1046692] +processed_samples 19900 unjoint_samples 19900 joint_samples 60 [635301, 1047548] +processed_samples 19902 unjoint_samples 19900 joint_samples 59 [16796, 1047084] +processed_samples 19902 unjoint_samples 19900 joint_samples 59 [16796, 1047084] +processed_samples 19902 unjoint_samples 19900 joint_samples 62 [121152, 1046376] +processed_samples 19902 unjoint_samples 19900 joint_samples 62 [121152, 1046376] +processed_samples 19900 unjoint_samples 19900 joint_samples 60 [534500, 1046551] +processed_samples 19900 unjoint_samples 19900 joint_samples 60 [534500, 1046551] +processed_samples 19900 unjoint_samples 19900 joint_samples 59 [1048293, 1025517] +processed_samples 19900 unjoint_samples 19900 joint_samples 59 [1048293, 1025517] +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x55cecc896080] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x556c9b7d8c80] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9e009040] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecbddf640] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9f3d7900] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [907591, 1046551] +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [885451, 1035591] +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [313332, 1047347] +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [908782, 1047548] +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [313332, 1047347] +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [885451, 1035591] +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [907591, 1046551] +processed_samples 20001 unjoint_samples 20000 joint_samples 59 [490251, 1046692] +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [1045373, 942286] +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [908782, 1047548] +processed_samples 20002 unjoint_samples 20000 joint_samples 59 [384555, 1047084] +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [1045373, 942286] +processed_samples 20002 unjoint_samples 20000 joint_samples 62 [435814, 1046376] +processed_samples 20002 unjoint_samples 20000 joint_samples 59 [384555, 1047084] +processed_samples 20001 unjoint_samples 20000 joint_samples 59 [490251, 1046692] +processed_samples 20002 unjoint_samples 20000 joint_samples 62 [435814, 1046376] +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x55cecca4b500] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x55cecc62d980] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecc62f840] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x556c9f547d00] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +[h264 @ 0x55cecd5cdb00] mmco: unref short failure +processed_samples 20100 unjoint_samples 20100 joint_samples 61 [990893, 334780] +processed_samples 20100 unjoint_samples 20100 joint_samples 61 [61182, 1045269] +processed_samples 20100 unjoint_samples 20100 joint_samples 61 [323653, 1043413] +processed_samples 20100 unjoint_samples 20100 joint_samples 60 [694293, 1047347] +processed_samples 20100 unjoint_samples 20100 joint_samples 61 [1040505, 118379] +processed_samples 20100 unjoint_samples 20100 joint_samples 61 [990893, 334780] +processed_samples 20100 unjoint_samples 20100 joint_samples 60 [694293, 1047347] +processed_samples 20100 unjoint_samples 20100 joint_samples 61 [61182, 1045269] +processed_samples 20100 unjoint_samples 20100 joint_samples 61 [1040505, 118379] +processed_samples 20100 unjoint_samples 20100 joint_samples 61 [323653, 1043413] +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +processed_samples 20101 unjoint_samples 20100 joint_samples 59 [763902, 1046692] +processed_samples 20102 unjoint_samples 20100 joint_samples 59 [652769, 1047084] +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +processed_samples 20102 unjoint_samples 20100 joint_samples 59 [652769, 1047084] +processed_samples 20101 unjoint_samples 20100 joint_samples 59 [763902, 1046692] +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +processed_samples 20102 unjoint_samples 20100 joint_samples 62 [1034171, 1046376] +processed_samples 20102 unjoint_samples 20100 joint_samples 62 [1034171, 1046376] +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +[h264 @ 0x55cecf1dd680] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecedf0f80] mmco: unref short failure +[h264 @ 0x556c9de59100] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9f138800] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x55ced00fafc0] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x55cece23d0c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecfcfd3c0] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x55cecc4988c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecfd71980] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x556c9b790b40] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +processed_samples 20200 unjoint_samples 20200 joint_samples 61 [316009, 1045269] +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +processed_samples 20200 unjoint_samples 20200 joint_samples 61 [316009, 1045269] +processed_samples 20200 unjoint_samples 20200 joint_samples 61 [611571, 1043413] +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +processed_samples 20200 unjoint_samples 20200 joint_samples 61 [611571, 1043413] +processed_samples 20200 unjoint_samples 20200 joint_samples 61 [1040505, 364591] +processed_samples 20200 unjoint_samples 20200 joint_samples 61 [990893, 685473] +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +processed_samples 20202 unjoint_samples 20200 joint_samples 60 [1024721, 170181] +processed_samples 20202 unjoint_samples 20200 joint_samples 63 [1040929, 324169] +processed_samples 20201 unjoint_samples 20200 joint_samples 60 [1032529, 184589] +processed_samples 20200 unjoint_samples 20200 joint_samples 61 [1040505, 364591] +processed_samples 20200 unjoint_samples 20200 joint_samples 60 [983765, 1047347] +processed_samples 20200 unjoint_samples 20200 joint_samples 61 [990893, 685473] +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +processed_samples 20202 unjoint_samples 20200 joint_samples 60 [1024721, 170181] +processed_samples 20202 unjoint_samples 20200 joint_samples 63 [1040929, 324169] +processed_samples 20201 unjoint_samples 20200 joint_samples 60 [1032529, 184589] +processed_samples 20200 unjoint_samples 20200 joint_samples 60 [983765, 1047347] +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecc338740] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9bdc9480] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecc07e5c0] mmco: unref short failure +[h264 @ 0x55cecc07e5c0] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9d570440] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x556c9f604dc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x55cecf9ebbc0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9b7845c0] mmco: unref short failure +[h264 @ 0x556c9b7845c0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55cecf3fb740] mmco: unref short failure +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [1047130, 261875] +[h264 @ 0x556ca103f1c0] mmco: unref short failure +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [913342, 1043413] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [1047130, 261875] +[h264 @ 0x55cece2562c0] mmco: unref short failure +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [1040505, 706454] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [618114, 1045269] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [990893, 989296] +processed_samples 20302 unjoint_samples 20300 joint_samples 60 [1024721, 480985] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [618114, 1045269] +processed_samples 20301 unjoint_samples 20300 joint_samples 60 [1032529, 558154] +processed_samples 20302 unjoint_samples 20300 joint_samples 63 [1040929, 568951] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [1040505, 706454] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [913342, 1043413] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [990893, 989296] +processed_samples 20302 unjoint_samples 20300 joint_samples 60 [1024721, 480985] +processed_samples 20302 unjoint_samples 20300 joint_samples 63 [1040929, 568951] +processed_samples 20301 unjoint_samples 20300 joint_samples 60 [1032529, 558154] +[h264 @ 0x556c9bbe68c0] mmco: unref short failure +[h264 @ 0x55ceccc93c40] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x55cecd0bbcc0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x55cece1c4a40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55ced4c03600] mmco: unref short failure +[h264 @ 0x556c9b545e40] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9f8d0e00] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecce63a00] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9ef53d00] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x55cecc3fe200] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x556c9bba9b80] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x55cecc6f5980] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +processed_samples 20400 unjoint_samples 20400 joint_samples 62 [137717, 1046734] +processed_samples 20400 unjoint_samples 20400 joint_samples 62 [137717, 1046734] +processed_samples 20400 unjoint_samples 20400 joint_samples 62 [1045036, 63782] +processed_samples 20400 unjoint_samples 20400 joint_samples 62 [1044092, 188834] +processed_samples 20400 unjoint_samples 20400 joint_samples 61 [1047130, 511560] +processed_samples 20400 unjoint_samples 20400 joint_samples 62 [1045036, 63782] +processed_samples 20400 unjoint_samples 20400 joint_samples 61 [1047130, 511560] +processed_samples 20400 unjoint_samples 20400 joint_samples 62 [1044092, 188834] +processed_samples 20400 unjoint_samples 20400 joint_samples 61 [867857, 1045269] +processed_samples 20401 unjoint_samples 20400 joint_samples 60 [1032529, 874239] +processed_samples 20400 unjoint_samples 20400 joint_samples 61 [867857, 1045269] +processed_samples 20401 unjoint_samples 20400 joint_samples 60 [1032529, 874239] +processed_samples 20402 unjoint_samples 20400 joint_samples 60 [1024721, 776172] +processed_samples 20402 unjoint_samples 20400 joint_samples 63 [1040929, 859559] +processed_samples 20402 unjoint_samples 20400 joint_samples 60 [1024721, 776172] +processed_samples 20402 unjoint_samples 20400 joint_samples 63 [1040929, 859559] +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55ced0bab780] mmco: unref short failure +[h264 @ 0x556c9dac5cc0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x556c9be2a5c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x556c9bc34440] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x556c9edf1dc0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x556c9b373f40] mmco: unref short failure +[h264 @ 0x55ced2b76540] mmco: unref short failure +[h264 @ 0x55ced2b76540] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x55ceccfcdfc0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x556c9cb9bb40] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x556c9ff46a80] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecd5f3f00] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55cecdedcd40] mmco: unref short failure +[h264 @ 0x556ca103f1c0] mmco: unref short failure +[h264 @ 0x55cecd8b7f40] mmco: unref short failure +[h264 @ 0x55cecd8b7f40] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9e9ed640] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +processed_samples 20500 unjoint_samples 20500 joint_samples 62 [65002, 1047631] +processed_samples 20500 unjoint_samples 20500 joint_samples 62 [566497, 1046734] +processed_samples 20500 unjoint_samples 20500 joint_samples 62 [1044092, 507821] +processed_samples 20500 unjoint_samples 20500 joint_samples 62 [65002, 1047631] +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +processed_samples 20500 unjoint_samples 20500 joint_samples 62 [1045036, 367773] +processed_samples 20500 unjoint_samples 20500 joint_samples 62 [1044092, 507821] +processed_samples 20502 unjoint_samples 20500 joint_samples 64 [200626, 1032981] +[h264 @ 0x556c9de59100] mmco: unref short failure +processed_samples 20502 unjoint_samples 20500 joint_samples 61 [1035094, 80832] +processed_samples 20500 unjoint_samples 20500 joint_samples 61 [1047130, 889813] +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +processed_samples 20501 unjoint_samples 20500 joint_samples 61 [109468, 1028336] +[h264 @ 0x55cecda4eb80] mmco: unref short failure +processed_samples 20500 unjoint_samples 20500 joint_samples 62 [1045036, 367773] +processed_samples 20501 unjoint_samples 20500 joint_samples 61 [109468, 1028336] +processed_samples 20500 unjoint_samples 20500 joint_samples 61 [1047130, 889813] +processed_samples 20500 unjoint_samples 20500 joint_samples 62 [566497, 1046734] +processed_samples 20502 unjoint_samples 20500 joint_samples 64 [200626, 1032981] +processed_samples 20502 unjoint_samples 20500 joint_samples 61 [1035094, 80832] +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9c34cd00] mmco: unref short failure +[h264 @ 0x556c9f02e200] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x55cecccaa700] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc475dc0] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9f1ff900] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9e8aeb80] mmco: unref short failure +[h264 @ 0x55ced2c33900] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9edb9c00] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x556c9d990ec0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x55cecbe48180] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9c09b7c0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55cecfd10e40] mmco: unref short failure +[h264 @ 0x556c9c8d4440] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf1bf040] mmco: unref short failure +[h264 @ 0x55cecf9986c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9eb85640] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cece2562c0] mmco: unref short failure +[h264 @ 0x55cecd757f00] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9b5cb880] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecda179c0] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecbca0840] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556ca0b64640] mmco: unref short failure +[h264 @ 0x556c9ce53980] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55ceccedbd40] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [250839, 1031199] +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [391373, 1047631] +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [975629, 1046734] +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [1045036, 768253] +processed_samples 20602 unjoint_samples 20600 joint_samples 61 [1035094, 374146] +processed_samples 20601 unjoint_samples 20600 joint_samples 61 [452610, 1028336] +processed_samples 20602 unjoint_samples 20600 joint_samples 64 [575839, 1032981] +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [1044092, 756207] +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x556c9a9a5580] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9e991800] mmco: unref short failure +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [250839, 1031199] +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [391373, 1047631] +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [1045036, 768253] +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [1044092, 756207] +[h264 @ 0x55cecc725b00] mmco: unref short failure +processed_samples 20601 unjoint_samples 20600 joint_samples 61 [452610, 1028336] +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [975629, 1046734] +processed_samples 20602 unjoint_samples 20600 joint_samples 61 [1035094, 374146] +processed_samples 20602 unjoint_samples 20600 joint_samples 64 [575839, 1032981] +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55cecfcef9c0] mmco: unref short failure +[h264 @ 0x55ced049ab40] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x556c9fa49a00] mmco: unref short failure +[h264 @ 0x55cecc725b00] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9d6704c0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x55cecc7f9480] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x55cecd12f200] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x55cecd2a7240] mmco: unref short failure +[h264 @ 0x556c9be345c0] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x556c9f75cb40] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cecd2579c0] mmco: unref short failure +[h264 @ 0x55cec1144d40] mmco: unref short failure +[h264 @ 0x556c9ef3c700] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x55cecca039c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecbd76240] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x55cecc463b00] mmco: unref short failure +[h264 @ 0x556c9b7845c0] mmco: unref short failure +[h264 @ 0x556c9b7845c0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x556c9b61bec0] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecfec0780] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +[h264 @ 0x556ca011b780] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecfa7bc80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x55cecda4eb80] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x556ca44ada00] mmco: unref short failure +[h264 @ 0x55ced1ffa340] mmco: unref short failure +[h264 @ 0x556c9b804fc0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x556c9cfa15c0] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55ced36ddd80] mmco: unref short failure +[h264 @ 0x55cecfbb1040] mmco: unref short failure +[h264 @ 0x556c9bebc3c0] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b547500] mmco: unref short failure +[h264 @ 0x556c9b5d4980] mmco: unref short failure +[h264 @ 0x55cecd1dc600] mmco: unref short failure +[h264 @ 0x55cecd10b5c0] mmco: unref short failure +[h264 @ 0x556c9f577980] mmco: unref short failure +[h264 @ 0x55cece1c0e40] mmco: unref short failure +[h264 @ 0x556c9b8eee80] mmco: unref short failure +processed_samples 20700 unjoint_samples 20700 joint_samples 63 [213147, 1046734] +processed_samples 20700 unjoint_samples 20700 joint_samples 63 [213147, 1046734] +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +processed_samples 20700 unjoint_samples 20700 joint_samples 62 [667492, 1047631] +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +[h264 @ 0x55cecc99a1c0] mmco: unref short failure +processed_samples 20700 unjoint_samples 20700 joint_samples 63 [159138, 1039965] +[h264 @ 0x556c9be4ff40] mmco: unref short failure +processed_samples 20700 unjoint_samples 20700 joint_samples 63 [159138, 1039965] +[h264 @ 0x556c9be4ff40] mmco: unref short failure +[h264 @ 0x556c9be4ff40] mmco: unref short failure +processed_samples 20700 unjoint_samples 20700 joint_samples 62 [667492, 1047631] +processed_samples 20700 unjoint_samples 20700 joint_samples 62 [572680, 1031199] +[h264 @ 0x55cecc62d980] mmco: unref short failure +processed_samples 20702 unjoint_samples 20700 joint_samples 65 [1046954, 15711] +processed_samples 20700 unjoint_samples 20700 joint_samples 62 [572680, 1031199] +processed_samples 20700 unjoint_samples 20700 joint_samples 62 [1045036, 994711] +processed_samples 20700 unjoint_samples 20700 joint_samples 62 [1045036, 994711] +[h264 @ 0x556c9be4ff40] mmco: unref short failure +processed_samples 20702 unjoint_samples 20700 joint_samples 65 [1046954, 15711] +processed_samples 20702 unjoint_samples 20700 joint_samples 61 [1035094, 614190] +processed_samples 20701 unjoint_samples 20700 joint_samples 61 [737129, 1028336] +processed_samples 20702 unjoint_samples 20700 joint_samples 61 [1035094, 614190] +processed_samples 20701 unjoint_samples 20700 joint_samples 61 [737129, 1028336] +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x55cecf993980] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[h264 @ 0x556c9b93f280] mmco: unref short failure +[Errno 19] No such device: 'data_2' +[Errno 19] No such device: 'data_2'[Errno 19] No such device: 'data_2' +[Errno 19] No such device: 'data_2' + +[Errno 19] No such device: 'data_2' +[Errno 19] No such device: 'data_2'[Errno 19] No such device: 'data_2' + +[Errno 19] No such device: 'data_2' +[Errno 19] No such device: 'data_2'[Errno 19] No such device: 'data_2'[Errno 19] No such device: 'data_2'[Errno 19] No such device: 'data_2' + + + +[Errno 19] No such device: 'data_2' +[Errno 19] No such device: 'data_2' +[Errno 19] No such device: 'data_2' +[Errno 19] No such device: 'data_2' +[2024-12-01 22:44:51,094] torch.distributed.elastic.agent.server.api: [ERROR] Error waiting on exit barrier. Elapsed: 300.10807490348816 seconds ++ set +x