diff --git "a/log_node13.txt" "b/log_node13.txt" new file mode 100644--- /dev/null +++ "b/log_node13.txt" @@ -0,0 +1,34036 @@ ++ echo Logging output to /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743//log_node13.txt +Logging output to /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743//log_node13.txt ++ export ASCEND_PROCESS_LOG_PATH=/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743//ascend/13 ++ ASCEND_PROCESS_LOG_PATH=/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743//ascend/13 ++ mkdir -p /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743//ascend/13 ++ DATA_PATH=/local_disk/cognitron_vl//configs/lcvlm_finetune_stage4.yaml ++ TOKENIZER_PATH=/data_4/models/Qwen/Qwen2.5-14B-Instruct/ ++ CKPT_LOAD_DIR=/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/ ++ VIT_CKPT_LOAD_DIR=/ ++ CKPT_SAVE_DIR=/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743// ++ rsync -avh /local_disk/cognitron_vl//configs/lcvlm_finetune_stage4.yaml /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743/ +sending incremental file list + +sent 71 bytes received 12 bytes 166.00 bytes/sec +total size is 23.84K speedup is 287.17 ++ cd /local_disk/cognitron_vl/ ++ rm -fr datasets ++ mkdir -p datasets ++ ln -s /data/data/ datasets/CV ++ ln -s /data/data/LLM datasets/LLM ++ ln -s /data/data/LMM datasets/LMM ++ source /local_disk/cognitron_vl//scripts/set_env_mg_npu.sh +++ source /usr/local/Ascend/driver/bin/setenv.bash ++++ DEP_INFO_FILE=/etc/ascend_install.info ++++ [[ -f /etc/ascend_install.info ]] ++++ . /etc/ascend_install.info ++++ DRV_LIB64_COMMON_LDPATH=/driver/lib64/common ++++ DRV_LIB64_DRV_LDPATH=/driver/lib64/driver ++++ DRV_LIB64_LDPATH=/driver/lib64 ++++ export LD_LIBRARY_PATH=/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ LD_LIBRARY_PATH=/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ export PATH=/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/root/miniconda3/envs/py38/bin:/root/miniconda3/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin ++++ PATH=/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/root/miniconda3/envs/py38/bin:/root/miniconda3/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin +++ source /usr/local/Ascend/ascend-toolkit/set_env.sh ++++ export LD_LIBRARY_PATH=/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ LD_LIBRARY_PATH=/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ export ASCEND_TOOLKIT_HOME=/usr/local/Ascend/ascend-toolkit/latest ++++ ASCEND_TOOLKIT_HOME=/usr/local/Ascend/ascend-toolkit/latest +++++ arch ++++ export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/driver/lib64/common:/driver/lib64/driver:/driver/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64:/usr/local/Ascend/ascend-toolkit/latest/tools/aml/lib64/plugin:/usr/local/Ascend/ascend-toolkit/latest/lib64:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/opskernel:/usr/local/Ascend/ascend-toolkit/latest/lib64/plugin/nnengine:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64:/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/local/Ascend/driver/lib64/common:/usr/local/Ascend/driver/lib64/driver:/usr/lib/x86_64-linux-gnu/hdf5/serial: ++++ export PYTHONPATH=/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe: ++++ PYTHONPATH=/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe: ++++ export PATH=/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/root/miniconda3/envs/py38/bin:/root/miniconda3/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin ++++ PATH=/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/bin:/usr/local/Ascend/ascend-toolkit/latest/compiler/ccec_compiler/bin:/usr/local/Ascend/ascend-toolkit/latest/tools/ccec_compiler/bin:/root/miniconda3/envs/py38/bin:/root/miniconda3/condabin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin ++++ export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest ++++ ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest ++++ export ASCEND_OPP_PATH=/usr/local/Ascend/ascend-toolkit/latest/opp ++++ ASCEND_OPP_PATH=/usr/local/Ascend/ascend-toolkit/latest/opp ++++ export TOOLCHAIN_HOME=/usr/local/Ascend/ascend-toolkit/latest/toolkit ++++ TOOLCHAIN_HOME=/usr/local/Ascend/ascend-toolkit/latest/toolkit ++++ export ASCEND_HOME_PATH=/usr/local/Ascend/ascend-toolkit/latest ++++ ASCEND_HOME_PATH=/usr/local/Ascend/ascend-toolkit/latest +++ export HCCL_CONNECT_TIMEOUT=7200 +++ HCCL_CONNECT_TIMEOUT=7200 +++ export HCCL_EXEC_TIMEOUT=7200 +++ HCCL_EXEC_TIMEOUT=7200 +++ export COMBINED_ENABLE=1 +++ COMBINED_ENABLE=1 +++ export MULTI_STREAM_MEMORY_REUSE=1 +++ MULTI_STREAM_MEMORY_REUSE=1 +++ export HCCL_RDMA_TC=160 +++ HCCL_RDMA_TC=160 +++ export HCCL_RDMA_SL=5 +++ HCCL_RDMA_SL=5 +++ export HCCL_INTRA_PCIE_ENABLE=0 +++ HCCL_INTRA_PCIE_ENABLE=0 +++ export HCCL_INTRA_ROCE_ENABLE=1 +++ HCCL_INTRA_ROCE_ENABLE=1 +++ export HCCL_RDMA_TIMEOUT=20 +++ HCCL_RDMA_TIMEOUT=20 +++ export INF_NAN_MODE_ENABLE=1 +++ INF_NAN_MODE_ENABLE=1 +++ export DISTRIBUTED_BACKEND=hccl +++ DISTRIBUTED_BACKEND=hccl +++ export ASCEND_LAUNCH_BLOCKING=0 +++ ASCEND_LAUNCH_BLOCKING=0 +++ export ASCEND_SLOG_PRINT_TO_STDOUT=0 +++ ASCEND_SLOG_PRINT_TO_STDOUT=0 +++ export ASCEND_GLOBAL_LOG_LEVEL=3 +++ ASCEND_GLOBAL_LOG_LEVEL=3 +++ export ASCEND_GLOBAL_EVENT_ENABLE=0 +++ ASCEND_GLOBAL_EVENT_ENABLE=0 +++ export TASK_QUEUE_ENABLE=1 +++ TASK_QUEUE_ENABLE=1 +++ export PTCOPY_ENABLE=1 +++ PTCOPY_ENABLE=1 +++ export COMBINED_ENABLE=1 +++ COMBINED_ENABLE=1 +++ export DYNAMIC_OP=ADD#MUL +++ DYNAMIC_OP=ADD#MUL +++ export HCCL_WHITELIST_DISABLE=1 +++ HCCL_WHITELIST_DISABLE=1 +++ export HCCL_CONNECT_TIMEOUT=7200 +++ HCCL_CONNECT_TIMEOUT=7200 +++ export HCCL_WHITELIST_DISABLE=1 +++ HCCL_WHITELIST_DISABLE=1 +++ export CUDA_DEVICE_MAX_CONNECTIONS=1 +++ CUDA_DEVICE_MAX_CONNECTIONS=1 +++ pip3 install --no-index --find-links=/data/software/ -r requirements_npu.txt +Looking in links: /data/software/ +Processing data/software/expecttest-0.2.1-py3-none-any.whl (from -r requirements_npu.txt (line 1)) +Requirement already satisfied: peft in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 2)) (0.7.0) +Processing data/software/XlsxWriter-3.2.0-py3-none-any.whl (from -r requirements_npu.txt (line 3)) +Requirement already satisfied: termcolor in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 4)) (2.4.0) +Requirement already satisfied: tabulate in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 5)) (0.9.0) +Processing data/software/tiktoken-0.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (from -r requirements_npu.txt (line 6)) +Requirement already satisfied: matplotlib in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 7)) (3.7.5) +Processing data/software/datasets-3.0.0-py3-none-any.whl (from -r requirements_npu.txt (line 8)) +Requirement already satisfied: einops in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 9)) (0.7.0) +Processing data/software/pybind11-2.13.6-py3-none-any.whl (from -r requirements_npu.txt (line 10)) +Requirement already satisfied: tensorboardX in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 11)) (2.6.2.2) +Processing data/software/pyarrow-17.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (from -r requirements_npu.txt (line 12)) +Requirement already satisfied: transformers>=4.40.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 13)) (4.40.1) +Requirement already satisfied: deepspeed>=0.14.2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 14)) (0.14.5) +Processing data/software/accelerate-0.34.2-py3-none-any.whl (from -r requirements_npu.txt (line 15)) +Requirement already satisfied: timm in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from -r requirements_npu.txt (line 16)) (0.9.16) +Processing data/software/flask-3.0.3-py3-none-any.whl (from -r requirements_npu.txt (line 17)) +Processing data/software/Flask_RESTful-0.3.10-py2.py3-none-any.whl (from -r requirements_npu.txt (line 18)) +Processing data/software/decord-0.6.0-py3-none-manylinux2010_x86_64.whl (from -r requirements_npu.txt (line 19)) +Processing data/software/natsort-8.4.0-py3-none-any.whl (from -r requirements_npu.txt (line 20)) +Requirement already satisfied: numpy>=1.17 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (1.24.4) +Requirement already satisfied: packaging>=20.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (23.2) +Requirement already satisfied: psutil in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (5.9.8) +Requirement already satisfied: pyyaml in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (5.4.1) +Requirement already satisfied: torch>=1.13.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (2.1.0+cpu) +Requirement already satisfied: tqdm in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (4.66.2) +Requirement already satisfied: safetensors in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (0.4.2) +Requirement already satisfied: huggingface-hub>=0.17.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft->-r requirements_npu.txt (line 2)) (0.20.3) +Requirement already satisfied: regex>=2022.1.18 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from tiktoken->-r requirements_npu.txt (line 6)) (2023.12.25) +Requirement already satisfied: requests>=2.26.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from tiktoken->-r requirements_npu.txt (line 6)) (2.31.0) +Requirement already satisfied: contourpy>=1.0.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (1.1.1) +Requirement already satisfied: cycler>=0.10 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (0.12.1) +Requirement already satisfied: fonttools>=4.22.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (4.49.0) +Requirement already satisfied: kiwisolver>=1.0.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (1.4.5) +Requirement already satisfied: pillow>=6.2.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (10.2.0) +Requirement already satisfied: pyparsing>=2.3.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (3.1.1) +Requirement already satisfied: python-dateutil>=2.7 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (2.8.2) +Requirement already satisfied: importlib-resources>=3.2.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from matplotlib->-r requirements_npu.txt (line 7)) (6.1.2) +Requirement already satisfied: filelock in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (3.13.1) +Requirement already satisfied: dill<0.3.9,>=0.3.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (0.3.7) +Requirement already satisfied: pandas in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (2.0.3) +Processing data/software/requests-2.32.3-py3-none-any.whl (from tiktoken->-r requirements_npu.txt (line 6)) +Processing data/software/tqdm-4.67.1-py3-none-any.whl (from peft->-r requirements_npu.txt (line 2)) +Requirement already satisfied: xxhash in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (3.4.1) +Requirement already satisfied: multiprocess in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (0.70.15) +Requirement already satisfied: fsspec<=2024.6.1,>=2023.1.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from fsspec[http]<=2024.6.1,>=2023.1.0->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (2023.10.0) +Requirement already satisfied: aiohttp in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets>=2.21.0->-r requirements_npu.txt (line 8)) (3.9.3) +Processing data/software/huggingface_hub-0.26.2-py3-none-any.whl (from peft->-r requirements_npu.txt (line 2)) +Requirement already satisfied: protobuf>=3.20 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from tensorboardX->-r requirements_npu.txt (line 11)) (4.25.3) +Requirement already satisfied: tokenizers<0.20,>=0.19 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from transformers>=4.40.1->-r requirements_npu.txt (line 13)) (0.19.1) +Requirement already satisfied: hjson in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from deepspeed>=0.14.2->-r requirements_npu.txt (line 14)) (3.1.0) +Requirement already satisfied: ninja in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from deepspeed>=0.14.2->-r requirements_npu.txt (line 14)) (1.11.1.1) +Requirement already satisfied: nvidia-ml-py in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from deepspeed>=0.14.2->-r requirements_npu.txt (line 14)) (12.560.30) +Requirement already satisfied: py-cpuinfo in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from deepspeed>=0.14.2->-r requirements_npu.txt (line 14)) (9.0.0) +Requirement already satisfied: pydantic in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from deepspeed>=0.14.2->-r requirements_npu.txt (line 14)) (1.10.15) +Processing data/software/safetensors-0.4.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (from peft->-r requirements_npu.txt (line 2)) +Requirement already satisfied: torchvision in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from timm->-r requirements_npu.txt (line 16)) (0.16.0) +Requirement already satisfied: Werkzeug>=3.0.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask->-r requirements_npu.txt (line 17)) (3.0.1) +Requirement already satisfied: Jinja2>=3.1.2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask->-r requirements_npu.txt (line 17)) (3.1.3) +Processing data/software/itsdangerous-2.2.0-py3-none-any.whl (from flask->-r requirements_npu.txt (line 17)) +Requirement already satisfied: click>=8.1.3 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask->-r requirements_npu.txt (line 17)) (8.1.7) +Processing data/software/blinker-1.8.2-py3-none-any.whl (from flask->-r requirements_npu.txt (line 17)) +Requirement already satisfied: importlib-metadata>=3.6.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask->-r requirements_npu.txt (line 17)) (7.0.1) +Processing data/software/aniso8601-9.0.1-py2.py3-none-any.whl (from flask_restful->-r requirements_npu.txt (line 18)) +Requirement already satisfied: six>=1.3.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask_restful->-r requirements_npu.txt (line 18)) (1.16.0) +Requirement already satisfied: pytz in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from flask_restful->-r requirements_npu.txt (line 18)) (2024.1) +Requirement already satisfied: aiosignal>=1.1.2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (1.3.1) +Requirement already satisfied: attrs>=17.3.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (23.2.0) +Requirement already satisfied: frozenlist>=1.1.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (1.4.1) +Requirement already satisfied: multidict<7.0,>=4.5 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (6.0.5) +Requirement already satisfied: yarl<2.0,>=1.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (1.9.4) +Requirement already satisfied: async-timeout<5.0,>=4.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (4.0.3) +Requirement already satisfied: typing-extensions>=3.7.4.3 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from huggingface-hub>=0.17.0->peft->-r requirements_npu.txt (line 2)) (4.10.0) +Requirement already satisfied: zipp>=0.5 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from importlib-metadata>=3.6.0->flask->-r requirements_npu.txt (line 17)) (3.17.0) +Requirement already satisfied: MarkupSafe>=2.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from Jinja2>=3.1.2->flask->-r requirements_npu.txt (line 17)) (2.1.5) +Requirement already satisfied: charset-normalizer<4,>=2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests>=2.26.0->tiktoken->-r requirements_npu.txt (line 6)) (3.3.2) +Requirement already satisfied: idna<4,>=2.5 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests>=2.26.0->tiktoken->-r requirements_npu.txt (line 6)) (3.6) +Requirement already satisfied: urllib3<3,>=1.21.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests>=2.26.0->tiktoken->-r requirements_npu.txt (line 6)) (1.26.18) +Requirement already satisfied: certifi>=2017.4.17 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests>=2.26.0->tiktoken->-r requirements_npu.txt (line 6)) (2024.2.2) +Requirement already satisfied: sympy in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from torch>=1.13.0->peft->-r requirements_npu.txt (line 2)) (1.4) +Requirement already satisfied: networkx in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from torch>=1.13.0->peft->-r requirements_npu.txt (line 2)) (3.1) +Requirement already satisfied: tzdata>=2022.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from pandas->datasets>=2.21.0->-r requirements_npu.txt (line 8)) (2024.1) +Requirement already satisfied: mpmath>=0.19 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from sympy->torch>=1.13.0->peft->-r requirements_npu.txt (line 2)) (1.3.0) +DEPRECATION: apex 0.1-ascend-20240523 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of apex or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063 +Installing collected packages: aniso8601, xlsxwriter, tqdm, safetensors, requests, pybind11, pyarrow, natsort, itsdangerous, expecttest, decord, blinker, tiktoken, huggingface-hub, flask, flask_restful, accelerate, datasets + Attempting uninstall: tqdm + Found existing installation: tqdm 4.66.2 + Uninstalling tqdm-4.66.2: + Successfully uninstalled tqdm-4.66.2 + Attempting uninstall: safetensors + Found existing installation: safetensors 0.4.2 + Uninstalling safetensors-0.4.2: + Successfully uninstalled safetensors-0.4.2 + Attempting uninstall: requests + Found existing installation: requests 2.31.0 + Uninstalling requests-2.31.0: + Successfully uninstalled requests-2.31.0 + Attempting uninstall: pyarrow + Found existing installation: pyarrow 15.0.0 + Uninstalling pyarrow-15.0.0: + Successfully uninstalled pyarrow-15.0.0 + Attempting uninstall: huggingface-hub + Found existing installation: huggingface-hub 0.20.3 + Uninstalling huggingface-hub-0.20.3: + Successfully uninstalled huggingface-hub-0.20.3 + Attempting uninstall: accelerate + Found existing installation: accelerate 0.25.0 + Uninstalling accelerate-0.25.0: + Successfully uninstalled accelerate-0.25.0 + Attempting uninstall: datasets + Found existing installation: datasets 2.16.0 + Uninstalling datasets-2.16.0: + Successfully uninstalled datasets-2.16.0 +ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. +tikit 1.8.2.240926 requires dicttoxml==1.7.4, which is not installed. +tikit 1.8.2.240926 requires docopt==0.6.2, which is not installed. +tikit 1.8.2.240926 requires future==0.18.2, which is not installed. +tikit 1.8.2.240926 requires hdfs==2.6.0, which is not installed. +tikit 1.8.2.240926 requires pure-sasl==0.6.2, which is not installed. +tikit 1.8.2.240926 requires py4j==0.10.7, which is not installed. +tikit 1.8.2.240926 requires PyHive[hive]==0.6.4, which is not installed. +tikit 1.8.2.240926 requires pyjwt>=2.4.0, which is not installed. +tikit 1.8.2.240926 requires requests-kerberos>=0.14.0, which is not installed. +tikit 1.8.2.240926 requires sasl==0.3.1, which is not installed. +tikit 1.8.2.240926 requires thrift==0.15.0, which is not installed. +tikit 1.8.2.240926 requires thrift-sasl>=0.1.0, which is not installed. +tikit 1.8.2.240926 requires certifi==2021.10.8, but you have certifi 2024.2.2 which is incompatible. +tikit 1.8.2.240926 requires cos-python-sdk-v5==1.9.29, but you have cos-python-sdk-v5 1.9.26 which is incompatible. +tikit 1.8.2.240926 requires idna==3.3, but you have idna 3.6 which is incompatible. +tikit 1.8.2.240926 requires prettytable==2.5.0, but you have prettytable 3.11.0 which is incompatible. +tikit 1.8.2.240926 requires urllib3==1.26.7, but you have urllib3 1.26.18 which is incompatible. +tikit 1.8.2.240926 requires wcwidth==0.2.5, but you have wcwidth 0.2.13 which is incompatible. +Successfully installed accelerate-0.34.2 aniso8601-9.0.1 blinker-1.8.2 datasets-3.0.0 decord-0.6.0 expecttest-0.2.1 flask-3.0.3 flask_restful-0.3.10 huggingface-hub-0.26.2 itsdangerous-2.2.0 natsort-8.4.0 pyarrow-17.0.0 pybind11-2.13.6 requests-2.32.3 safetensors-0.4.5 tiktoken-0.7.0 tqdm-4.67.1 xlsxwriter-3.2.0 +WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv +++ return 0 ++ MEGATRON_DIR=/local_disk/cognitron_vl//third_party/Megatron-LM_core_r0.6.0/ ++ MINDSPEED_DIR=/local_disk/cognitron_vl//third_party/MindSpeed_core_r0.6.0/ ++ MODELLINK_DIR=/local_disk/cognitron_vl//third_party/ModelLink/ ++ pip3 install --no-index --find-links=/data/software/ -e /local_disk/cognitron_vl//third_party/Megatron-LM_core_r0.6.0/ +Looking in links: /data/software/ +Obtaining file://local_disk/cognitron_vl/third_party/Megatron-LM_core_r0.6.0 + Installing build dependencies: started + Installing build dependencies: finished with status 'done' + Checking if build backend supports build_editable: started + Checking if build backend supports build_editable: finished with status 'done' + Getting requirements to build editable: started + Getting requirements to build editable: finished with status 'done' + Installing backend dependencies: started + Installing backend dependencies: finished with status 'done' + Preparing editable metadata (pyproject.toml): started + Preparing editable metadata (pyproject.toml): finished with status 'done' +Building wheels for collected packages: megatron_core + Building editable for megatron_core (pyproject.toml): started + Building editable for megatron_core (pyproject.toml): finished with status 'done' + Created wheel for megatron_core: filename=megatron_core-0.6.0-0.editable-cp38-cp38-linux_x86_64.whl size=8791 sha256=d2626e666003b09b79922efa408f8415010dc56c2b30b71baf87359e94179578 + Stored in directory: /tmp/pip-ephem-wheel-cache-fjc9qifk/wheels/54/9c/d1/d2015aa0c34e791e64d65d19395e5a9a5528f0c63fd519b9ff +Successfully built megatron_core +DEPRECATION: apex 0.1-ascend-20240523 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of apex or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063 +Installing collected packages: megatron_core +Successfully installed megatron_core-0.6.0 +WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv ++ pip3 install --no-index --find-links=/data/software/ -e /local_disk/cognitron_vl//third_party/MindSpeed_core_r0.6.0/ +Looking in links: /data/software/ +Obtaining file://local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0 + Preparing metadata (setup.py): started + Preparing metadata (setup.py): finished with status 'done' +WARNING: Error parsing requirements for tokenizers: [Errno 2] No such file or directory: '/root/miniconda3/envs/py38/lib/python3.8/site-packages/tokenizers-0.19.1.dist-info/METADATA' +WARNING: Error parsing requirements for transformers: [Errno 2] No such file or directory: '/root/miniconda3/envs/py38/lib/python3.8/site-packages/transformers-4.40.1.dist-info/METADATA' +DEPRECATION: apex 0.1-ascend-20240523 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of apex or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063 +Installing collected packages: mindspeed + Running setup.py develop for mindspeed +Successfully installed mindspeed-0.6.0 +WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv ++ pip3 install --no-index --find-links=/data/software/ -e /local_disk/cognitron_vl//third_party/ModelLink/ +Looking in links: /data/software/ +Obtaining file://local_disk/cognitron_vl/third_party/ModelLink + Preparing metadata (setup.py): started + Preparing metadata (setup.py): finished with status 'done' +Requirement already satisfied: numpy in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (1.24.4) +Processing data/software/transformers-4.43.2-py3-none-any.whl (from modellink==0.0.1) +Processing data/software/transformers-stream-generator-0.0.5.tar.gz (from modellink==0.0.1) + Preparing metadata (setup.py): started + Preparing metadata (setup.py): finished with status 'done' +Requirement already satisfied: sympy in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (1.4) +Requirement already satisfied: decorator in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (5.1.1) +Requirement already satisfied: scipy in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (1.10.1) +Requirement already satisfied: sentencepiece in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (0.2.0) +Requirement already satisfied: einops in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (0.7.0) +Requirement already satisfied: datasets in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (3.0.0) +Requirement already satisfied: pybind11 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (2.13.6) +Requirement already satisfied: accelerate in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (0.34.2) +Requirement already satisfied: six in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (1.16.0) +Requirement already satisfied: protobuf in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (4.25.3) +Processing data/software/peft-0.7.1-py3-none-any.whl (from modellink==0.0.1) +Requirement already satisfied: tiktoken in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from modellink==0.0.1) (0.7.0) +Requirement already satisfied: packaging>=20.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (23.2) +Requirement already satisfied: psutil in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (5.9.8) +Requirement already satisfied: pyyaml in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (5.4.1) +Requirement already satisfied: torch>=1.13.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (2.1.0+cpu) +Requirement already satisfied: tqdm in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (4.67.1) +Requirement already satisfied: safetensors in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (0.4.5) +Requirement already satisfied: huggingface-hub>=0.17.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from peft==0.7.1->modellink==0.0.1) (0.26.2) +Requirement already satisfied: filelock in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from transformers==4.43.2->modellink==0.0.1) (3.13.1) +Requirement already satisfied: regex!=2019.12.17 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from transformers==4.43.2->modellink==0.0.1) (2023.12.25) +Requirement already satisfied: requests in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from transformers==4.43.2->modellink==0.0.1) (2.32.3) +Processing data/software/tokenizers-0.19.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (from transformers==4.43.2->modellink==0.0.1) +Requirement already satisfied: pyarrow>=15.0.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (17.0.0) +Requirement already satisfied: dill<0.3.9,>=0.3.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (0.3.7) +Requirement already satisfied: pandas in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (2.0.3) +Requirement already satisfied: xxhash in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (3.4.1) +Requirement already satisfied: multiprocess in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (0.70.15) +Requirement already satisfied: fsspec<=2024.6.1,>=2023.1.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from fsspec[http]<=2024.6.1,>=2023.1.0->datasets->modellink==0.0.1) (2023.10.0) +Requirement already satisfied: aiohttp in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from datasets->modellink==0.0.1) (3.9.3) +Requirement already satisfied: mpmath>=0.19 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from sympy->modellink==0.0.1) (1.3.0) +Requirement already satisfied: aiosignal>=1.1.2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (1.3.1) +Requirement already satisfied: attrs>=17.3.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (23.2.0) +Requirement already satisfied: frozenlist>=1.1.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (1.4.1) +Requirement already satisfied: multidict<7.0,>=4.5 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (6.0.5) +Requirement already satisfied: yarl<2.0,>=1.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (1.9.4) +Requirement already satisfied: async-timeout<5.0,>=4.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from aiohttp->datasets->modellink==0.0.1) (4.0.3) +Requirement already satisfied: typing-extensions>=3.7.4.3 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from huggingface-hub>=0.17.0->peft==0.7.1->modellink==0.0.1) (4.10.0) +Requirement already satisfied: charset-normalizer<4,>=2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests->transformers==4.43.2->modellink==0.0.1) (3.3.2) +Requirement already satisfied: idna<4,>=2.5 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests->transformers==4.43.2->modellink==0.0.1) (3.6) +Requirement already satisfied: urllib3<3,>=1.21.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests->transformers==4.43.2->modellink==0.0.1) (1.26.18) +Requirement already satisfied: certifi>=2017.4.17 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from requests->transformers==4.43.2->modellink==0.0.1) (2024.2.2) +Requirement already satisfied: networkx in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from torch>=1.13.0->peft==0.7.1->modellink==0.0.1) (3.1) +Requirement already satisfied: jinja2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from torch>=1.13.0->peft==0.7.1->modellink==0.0.1) (3.1.3) +Requirement already satisfied: python-dateutil>=2.8.2 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from pandas->datasets->modellink==0.0.1) (2.8.2) +Requirement already satisfied: pytz>=2020.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from pandas->datasets->modellink==0.0.1) (2024.1) +Requirement already satisfied: tzdata>=2022.1 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from pandas->datasets->modellink==0.0.1) (2024.1) +Requirement already satisfied: MarkupSafe>=2.0 in /root/miniconda3/envs/py38/lib/python3.8/site-packages (from jinja2->torch>=1.13.0->peft==0.7.1->modellink==0.0.1) (2.1.5) +Building wheels for collected packages: transformers_stream_generator + Building wheel for transformers_stream_generator (setup.py): started + Building wheel for transformers_stream_generator (setup.py): finished with status 'done' + Created wheel for transformers_stream_generator: filename=transformers_stream_generator-0.0.5-py3-none-any.whl size=12425 sha256=8ba850436130d2f5021790b66e81977246abf57ffa083df74963c9acfa63ad6c + Stored in directory: /root/.cache/pip/wheels/56/8c/42/5381d9c36bc85f28982f4cf8f98dc44d37a6d6c04897a5cb7c +Successfully built transformers_stream_generator +DEPRECATION: apex 0.1-ascend-20240523 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of apex or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063 +Installing collected packages: tokenizers, transformers, transformers_stream_generator, peft, modellink + Attempting uninstall: tokenizers + Found existing installation: tokenizers 0.20.3 + Uninstalling tokenizers-0.20.3: + Successfully uninstalled tokenizers-0.20.3 + Attempting uninstall: transformers + Found existing installation: transformers 4.46.3 + Uninstalling transformers-4.46.3: + Successfully uninstalled transformers-4.46.3 + Attempting uninstall: peft + Found existing installation: peft 0.7.0 + Uninstalling peft-0.7.0: + Successfully uninstalled peft-0.7.0 + Running setup.py develop for modellink +Successfully installed modellink-0.0.1 peft-0.7.1 tokenizers-0.19.1 transformers-4.43.2 transformers_stream_generator-0.0.5 +WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv ++ export PYTHONPATH=/local_disk/cognitron_vl//third_party/Megatron-LM_core_r0.6.0//:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe: ++ PYTHONPATH=/local_disk/cognitron_vl//third_party/Megatron-LM_core_r0.6.0//:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe:/usr/local/Ascend/ascend-toolkit/latest/python/site-packages:/usr/local/Ascend/ascend-toolkit/latest/opp/built-in/op_impl/ai_core/tbe: ++ GPUS_PER_NODE=16 ++ NNODES=32 ++ NODE_RANK=13 ++ MASTER_PORT=34567 ++ export CUDA_DEVICE_MAX_CONNECTIONS=1 ++ CUDA_DEVICE_MAX_CONNECTIONS=1 ++ export PYTORCH_NPU_ALLOC_CONF=expandable_segments:True ++ PYTORCH_NPU_ALLOC_CONF=expandable_segments:True ++ VISION_SEQ_LENGTH=1025 ++ IMAGE_TOKEN_LENGTH=256 ++ IMAGE_SIZE=448 ++ VISION_MODEL_TYPE=intern_300m ++ TP=8 ++ PP=1 ++ CP=8 ++ CP_ALGO=megatron_cp_algo ++ CP_MASK=causal ++ DISTRIBUTED_ARGS=' + --nproc_per_node 16 --nnodes 32 --node_rank 13 --master_addr train-1198772881325351168-93vlj4s2getc-master-0.train-100034032793.svc.cluster.local --master_port 34567 +' ++ GPT_ARGS=' + --use-mcore-models --tensor-model-parallel-size 8 --pipeline-model-parallel-size 1 --context-parallel-size 8 --context-parallel-algo megatron_cp_algo --cp-attention-mask-type causal --use-cp-send-recv-overlap --no-create-attention-mask-in-dataloader --sparse-mode 4 --sequence-parallel --recompute-method block --recompute-granularity full --recompute-num-layers 48 --num-layers 48 --hidden-size 5120 --ffn-hidden-size 13824 --num-attention-heads 40 --group-query-attention --num-query-groups 8 --tokenizer-type PretrainedFromHF --tokenizer-name-or-path /data_4/models/Qwen/Qwen2.5-14B-Instruct/ --seq-length 1048576 --max-position-embeddings 1048576 --micro-batch-size 1 --global-batch-size 8 --make-vocab-size-divisible-by 1 --padded-vocab-size 152064 --rotary-base 1000000.0 --lr 5.00e-6 --train-iters 500 --lr-decay-style cosine --untie-embeddings-and-output-weights --disable-bias-linear --attention-dropout 0.0 --init-method-std 0.01 --hidden-dropout 0.0 --position-embedding-type rope --normalization RMSNorm --use-fused-rmsnorm --norm-epsilon 1e-6 --swiglu --use-flash-attn --use-fused-rotary-pos-emb --use-rotary-position-embeddings --use-fused-swiglu --use-mc2 --no-masked-softmax-fusion --attention-softmax-in-fp32 --min-lr 1.00e-7 --weight-decay 0.0 --lr-warmup-fraction 0.03 --clip-grad 1.0 --adam-beta1 0.9 --adam-beta2 0.999 --add-qkv-bias --initial-loss-scale 4096 --no-gradient-accumulation-fusion --use-distributed-optimizer --bf16 --overlap-grad-reduce --finetune --vision-model-freeze --vision-model-type intern_300m --vision-downsample-ratio 0.5 --vision-projector-type mlp --vision-projector-pre-norm --vision-process-type dynamic --vision-normalize-type imagenet --vision-seq-length 1025 --image-token-length 256 --image-size 448 --prompt-format qwen2 --is-instruction-dataset --max-num-image 4096 --max-fps 1 --add-class-token --min-patch-grid 1 --max-patch-grid 12 --logit-mask --cross-dataset-joint ' ++ DATA_ARGS=' + --data-path /local_disk/cognitron_vl//configs/lcvlm_finetune_stage4.yaml --split 100,0,0 --data-seq-length 1048576 --num-workers 8 ' ++ CKPT_ARGS=' + --load /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/ --vit-load / --no-load-optim --no-load-rng --seed 42424242 --save /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743// ' ++ OUTPUT_ARGS=' + --log-interval 1 --save-interval 20 --eval-interval 20 --eval-iters 0 --log-throughput --distributed-timeout-minutes 120 ' ++ torchrun --nproc_per_node 16 --nnodes 32 --node_rank 13 --master_addr train-1198772881325351168-93vlj4s2getc-master-0.train-100034032793.svc.cluster.local --master_port 34567 /local_disk/cognitron_vl//lcvlm_modellink/pretrain_lcvlm.py --use-mcore-models --tensor-model-parallel-size 8 --pipeline-model-parallel-size 1 --context-parallel-size 8 --context-parallel-algo megatron_cp_algo --cp-attention-mask-type causal --use-cp-send-recv-overlap --no-create-attention-mask-in-dataloader --sparse-mode 4 --sequence-parallel --recompute-method block --recompute-granularity full --recompute-num-layers 48 --num-layers 48 --hidden-size 5120 --ffn-hidden-size 13824 --num-attention-heads 40 --group-query-attention --num-query-groups 8 --tokenizer-type PretrainedFromHF --tokenizer-name-or-path /data_4/models/Qwen/Qwen2.5-14B-Instruct/ --seq-length 1048576 --max-position-embeddings 1048576 --micro-batch-size 1 --global-batch-size 8 --make-vocab-size-divisible-by 1 --padded-vocab-size 152064 --rotary-base 1000000.0 --lr 5.00e-6 --train-iters 500 --lr-decay-style cosine --untie-embeddings-and-output-weights --disable-bias-linear --attention-dropout 0.0 --init-method-std 0.01 --hidden-dropout 0.0 --position-embedding-type rope --normalization RMSNorm --use-fused-rmsnorm --norm-epsilon 1e-6 --swiglu --use-flash-attn --use-fused-rotary-pos-emb --use-rotary-position-embeddings --use-fused-swiglu --use-mc2 --no-masked-softmax-fusion --attention-softmax-in-fp32 --min-lr 1.00e-7 --weight-decay 0.0 --lr-warmup-fraction 0.03 --clip-grad 1.0 --adam-beta1 0.9 --adam-beta2 0.999 --add-qkv-bias --initial-loss-scale 4096 --no-gradient-accumulation-fusion --use-distributed-optimizer --bf16 --overlap-grad-reduce --finetune --vision-model-freeze --vision-model-type intern_300m --vision-downsample-ratio 0.5 --vision-projector-type mlp --vision-projector-pre-norm --vision-process-type dynamic --vision-normalize-type imagenet --vision-seq-length 1025 --image-token-length 256 --image-size 448 --prompt-format qwen2 --is-instruction-dataset --max-num-image 4096 --max-fps 1 --add-class-token --min-patch-grid 1 --max-patch-grid 12 --logit-mask --cross-dataset-joint --data-path /local_disk/cognitron_vl//configs/lcvlm_finetune_stage4.yaml --split 100,0,0 --data-seq-length 1048576 --num-workers 8 --load /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/ --vit-load / --no-load-optim --no-load-rng --seed 42424242 --save /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp8_stage4.sh/20241128_234743// --log-interval 1 --save-interval 20 --eval-interval 20 --eval-iters 0 --log-throughput --distributed-timeout-minutes 120 --distributed-backend nccl +[2024-11-28 15:50:29,343] torch.distributed.run: [WARNING] +[2024-11-28 15:50:29,343] torch.distributed.run: [WARNING] ***************************************** +[2024-11-28 15:50:29,343] torch.distributed.run: [WARNING] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. +[2024-11-28 15:50:29,343] torch.distributed.run: [WARNING] ***************************************** +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root...Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... + +Creating extension directory /root/.cache/torch_extensions/py38_cpu/adaptive_cp... +Creating extension directory /root/.cache/torch_extensions/py38_cpu/adaptive_cp... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Emitting ninja build file /root/.cache/torch_extensions/py38_cpu/adaptive_cp/build.ninja... +Building extension module adaptive_cp... +Allowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N) +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +Using /root/.cache/torch_extensions/py38_cpu as PyTorch extensions root... +[1/2] c++ -MMD -MF adaptive_cp.o.d -DTORCH_EXTENSION_NAME=adaptive_cp -DTORCH_API_INCLUDE_EXTENSION_H -DPYBIND11_COMPILER_TYPE=\"_gcc\" -DPYBIND11_STDLIB=\"_libstdcpp\" -DPYBIND11_BUILD_ABI=\"_cxxabi1011\" -I/usr/local/Ascend/ascend-toolkit/latest/include -I/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch_npu/include -I/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch_npu/third_party -I/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch_npu/acl -I/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch_npu/inc -isystem /root/miniconda3/envs/py38/lib/python3.8/site-packages/torch/include -isystem /root/miniconda3/envs/py38/lib/python3.8/site-packages/torch/include/torch/csrc/api/include -isystem /root/miniconda3/envs/py38/lib/python3.8/site-packages/torch/include/TH -isystem /root/miniconda3/envs/py38/lib/python3.8/site-packages/torch/include/THC -isystem /root/miniconda3/envs/py38/include/python3.8 -D_GLIBCXX_USE_CXX11_ABI=0 -fPIC -std=c++17 -fstack-protector-all -Wl,-z,relro,-z,now,-z,noexecstack -fPIC -pie -Wl,--disable-new-dtags,--rpath -s -O2 -c local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/ops/csrc/algorithm/adaptive_cp/adaptive_cp.cpp -o adaptive_cp.o +[2/2] c++ adaptive_cp.o -shared -L/usr/local/Ascend/ascend-toolkit/latest/lib64 -lascendcl -L/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch_npu/lib -ltorch_npu -L/root/miniconda3/envs/py38/lib/python3.8/site-packages/torch/lib -lc10 -ltorch_cpu -ltorch -ltorch_python -o adaptive_cp.so +Loading extension module adaptive_cp... +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +Loading extension module adaptive_cp... +Loading extension module adaptive_cp... +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +Loading extension module adaptive_cp... +Loading extension module adaptive_cp... +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +local_disk/cognitron_vl/third_party/MindSpeed_core_r0.6.0/mindspeed/core/tensor_parallel/layers.py:30: UserWarning: failed to generate the npu_matmul_add_fp32 + warnings.warn("failed to generate the npu_matmul_add_fp32") +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +/root/miniconda3/envs/py38/lib/python3.8/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'libc10_cuda.so: cannot open shared object file: No such file or directory'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source? + warn( +> compiling dataset index builder ... +make: Entering directory 'local_disk/cognitron_vl/third_party/Megatron-LM_core_r0.6.0/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory 'local_disk/cognitron_vl/third_party/Megatron-LM_core_r0.6.0/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.073 seconds +vision_projector_recompute Falsevision_projector_recompute False + +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute Falsevision_projector_recompute False + +vision_projector_recompute False +vision_projector_recompute False +vision_projector_recompute Falsevision_projector_recompute False + +vision_projector_recompute Falsevision_projector_recompute False + +vision_model_freeze +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +vision_model_freeze +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. + +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False.vision_model_freeze +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +vision_model_freeze=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. + +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +vision_model_freeze=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.vision_model_freeze=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False.vision_model_freeze +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.vision_model_freeze=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +vision_model_freeze +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. + + + + + + +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False.vision_model_freeze=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + + + + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False.vision_model_freeze + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. + + + + + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + + + + + + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + + + + +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + + + + +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +)=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + + + + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + + + + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. + + + + + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. + + + + + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + + + + +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + + + + +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. + + + + + + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + + + + +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +)=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + + + + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + + + + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + + + + +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + + + + + + +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. + + + + + + +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + + + + + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. + + + + + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. + + + + + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. + + + + +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + + + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. + + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +vision_model_freeze +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +)=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +)=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +)=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +)=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) + +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +vision_model_freeze +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +vision_model_freeze=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False.=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. + +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +vision_model_freeze +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +vision_model_freeze +=> set param external_feature_model.vit.class_token torch.Size([1, 1, 1024]) requires grad to False. +=> set param external_feature_model.vit.conv1.weight torch.Size([1024, 3, 14, 14]) requires grad to False. +=> set param external_feature_model.vit.conv1.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.position_embeddings.weight torch.Size([1025, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.0.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.1.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.2.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.3.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.4.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.5.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.6.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.7.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.8.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.9.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.10.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.11.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.12.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.13.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.14.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.15.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.16.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.17.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.18.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.19.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.20.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.21.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.22.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls1 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.ls2 torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.input_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.weight torch.Size([1024, 128]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_proj.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.weight torch.Size([384, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.self_attention.linear_qkv.bias torch.Size([384]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.weight torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.pre_mlp_layernorm.bias torch.Size([1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.weight torch.Size([512, 1024]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc1.bias torch.Size([512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.weight torch.Size([1024, 512]) requires grad to False. +=> set param external_feature_model.vit.decoder.layers.23.mlp.linear_fc2.bias torch.Size([1024]) requires grad to False. +model GPTVLModel( + (external_feature_model): MegatronVisionModel( + (vit): InternViTModel( + (conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14)) + (position_embeddings): Embedding(1025, 1024) + (decoder): TransformerBlock( + (layers): ModuleList( + (0-23): 24 x InternViTTransformerLayer( + (input_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + ) + (self_attn_bda): IdentityFuncOp() + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): LayerNorm((1024,), eps=1e-06, elementwise_affine=True) + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + (mlp_bda): IdentityFuncOp() + ) + ) + ) + ) + (vision_projection): MultimodalProjector( + (encoder): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + (pre_proj_layernorm): LayerNorm((4096,), eps=1e-05, elementwise_affine=True) + ) + (embedding): LanguageModelEmbedding( + (word_embeddings): VocabParallelEmbedding() + (embedding_dropout): Dropout(p=0.0, inplace=False) + ) + (rotary_pos_emb): RotaryEmbedding() + (decoder): TransformerBlock( + (layers): ModuleList( + (0-47): 48 x TransformerLayer( + (input_layernorm): RMSNorm() + (self_attention): SelfAttention( + (core_attention): DotProductAttention( + (scale_mask_softmax): FusedScaleMaskSoftmax() + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (linear_proj): RowParallelLinear() + (linear_qkv): ColumnParallelLinear() + (q_layernorm): IdentityOp() + (k_layernorm): IdentityOp() + ) + (pre_cross_attn_layernorm): IdentityOp() + (cross_attention): IdentityOp() + (cross_attn_bda): IdentityFuncOp() + (pre_mlp_layernorm): RMSNorm() + (mlp): MLP( + (linear_fc1): ColumnParallelLinear() + (linear_fc2): RowParallelLinear() + ) + ) + ) + (final_layernorm): RMSNorm() + ) + (output_layer): ColumnParallelLinear() +) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) + + + + + +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + + + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + + + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + + + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + + +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + + + +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + + + + + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + + + + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + + + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) + + + + + +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + + + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + + +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + + +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.vision_projection.encoder.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.external_feature_model.pre_proj_layernorm.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.embedding.word_embeddings.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.0.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.0.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.1.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.1.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.1.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.2.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.2.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.3.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.3.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.4.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.4.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.5.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.6.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.7.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.7.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.7.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.8.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.9.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.9.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.10.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.10.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.11.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.12.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.13.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.14.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.14.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.15.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.15.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.16.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.16.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.16.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.17.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.17.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.18.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.18.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.19.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.19.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.20.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.20.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.21.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.22.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.23.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.23.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.23.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.24.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.24.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.24.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.25.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.25.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.26.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.26.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.27.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.27.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.28.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.28.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.29.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.29.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.30.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.30.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.31.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.31.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.32.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc1.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.32.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.input_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_proj.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.33.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.33.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.self_attention.linear_proj.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.34.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False)_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc1.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.34.mlp.linear_fc2.weight key (1.0, 1.0, False, False)_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) + +_get_param_groups name module.module.decoder.layers.35.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.35.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.36.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.37.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.38.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.39.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.40.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.41.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.42.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.43.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.44.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.45.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.46.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.input_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_proj.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.self_attention.linear_qkv.bias key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.pre_mlp_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc1.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.layers.47.mlp.linear_fc2.weight key (1.0, 1.0, False, False) +_get_param_groups name module.module.decoder.final_layernorm.weight key (0.0, 1.0, False, False) +_get_param_groups name module.module.output_layer.weight key (1.0, 1.0, False, False) +_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration_load_base_checkpoint iteration 10001000_load_base_checkpoint iteration_load_base_checkpoint iteration 1000100010001000 +10001000 _load_base_checkpoint release +1000 + + +_load_base_checkpoint release_load_base_checkpoint iteration_load_base_checkpoint release_load_base_checkpoint iteration10001000 + + + + 1000_load_base_checkpoint release_load_base_checkpoint release False +_load_base_checkpoint release +_load_base_checkpoint release_load_base_checkpoint release1000_load_base_checkpoint release1000False + False +False1000 _load_base_checkpoint release _load_base_checkpoint releaseFalse + + + +_load_base_checkpoint releaseFalse1000 + +False + + FalseFalse_load_base_checkpoint releaseFalseFalse _load_base_checkpoint release _load_base_checkpoint release + +_load_base_checkpoint release + + + +False False + FalseFalse + +False + +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_07/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_00/model_optim_rng.pt +_load_base_checkpoint _load_base_checkpoint/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_04/model_optim_rng.pt +/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_00/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_01/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_03/model_optim_rng.pt +_load_base_checkpoint_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_02/model_optim_rng.pt_load_base_checkpoint/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_05/model_optim_rng.pt + +/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_06/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_06/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_02/model_optim_rng.pt +_load_base_checkpoint_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_04/model_optim_rng.pt/data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_05/model_optim_rng.pt + +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_03/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_01/model_optim_rng.pt +_load_base_checkpoint /data_2/output/LM/scripts/modellink/qwen25/finetune_qwen25_14b_intern_300m_ptd_tp8pp1cp2_stage3.sh/20241127_204213/iter_0001000/mp_rank_07/model_optim_rng.pt +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +load_checkpoint iteration 0 +load_checkpoint release False +strict True +> rank 223 does not create GPT datasets ... +> rank 215 does not create GPT datasets ... +> rank 216 is creating GPT datasets ... +> rank 221 does not create GPT datasets ...> rank 208 is creating GPT datasets ... + +> rank 212 does not create GPT datasets ... +> rank 222 does not create GPT datasets ...> rank 210 does not create GPT datasets ... +> rank 217 does not create GPT datasets ... + +> rank 211 does not create GPT datasets ... +> rank 219 does not create GPT datasets ... +> rank 209 does not create GPT datasets ... +> rank 214 does not create GPT datasets ... +> rank 220 does not create GPT datasets ... +> rank 218 does not create GPT datasets ... +> rank 213 does not create GPT datasets ... +target_ratios [(1, 1), (1, 2), (2, 1), (3, 1), (1, 3), (2, 2), (4, 1), (1, 4), (5, 1), (1, 5), (1, 6), (6, 1), (3, 2), (2, 3), (7, 1), (1, 7), (4, 2), (2, 4), (1, 8), (8, 1), (1, 9), (3, 3), (9, 1), (2, 5), (5, 2), (10, 1), (1, 10), (11, 1), (1, 11), (12, 1), (3, 4), (4, 3), (1, 12), (6, 2), (2, 6)] +possible_resolutions [[448, 448], [448, 896], [896, 448], [1344, 448], [448, 1344], [896, 896], [1792, 448], [448, 1792], [2240, 448], [448, 2240], [448, 2688], [2688, 448], [1344, 896], [896, 1344], [3136, 448], [448, 3136], [1792, 896], [896, 1792], [448, 3584], [3584, 448], [448, 4032], [1344, 1344], [4032, 448], [896, 2240], [2240, 896], [4480, 448], [448, 4480], [4928, 448], [448, 4928], [5376, 448], [1344, 1792], [1792, 1344], [448, 5376], [2688, 896], [896, 2688]] +target_ratios [(1, 1), (1, 2), (2, 1), (3, 1), (1, 3), (2, 2), (4, 1), (1, 4), (5, 1), (1, 5), (1, 6), (6, 1), (3, 2), (2, 3), (7, 1), (1, 7), (4, 2), (2, 4), (1, 8), (8, 1), (1, 9), (3, 3), (9, 1), (2, 5), (5, 2), (10, 1), (1, 10), (11, 1), (1, 11), (12, 1), (3, 4), (4, 3), (1, 12), (6, 2), (2, 6)] +possible_resolutions [[448, 448], [448, 896], [896, 448], [1344, 448], [448, 1344], [896, 896], [1792, 448], [448, 1792], [2240, 448], [448, 2240], [448, 2688], [2688, 448], [1344, 896], [896, 1344], [3136, 448], [448, 3136], [1792, 896], [896, 1792], [448, 3584], [3584, 448], [448, 4032], [1344, 1344], [4032, 448], [896, 2240], [2240, 896], [4480, 448], [448, 4480], [4928, 448], [448, 4928], [5376, 448], [1344, 1792], [1792, 1344], [448, 5376], [2688, 896], [896, 2688]] +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eeda7ac640] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed4382780] [h264 @ 0x56387996a9c0] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x55eed4382780] [h264 @ 0x56387996a9c0] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +processed_samples 100 unjoint_samples 100 joint_samples 0 [162062, 161249] +processed_samples 100 unjoint_samples 100 joint_samples 0 [162062, 161249] +processed_samples 100 unjoint_samples 100 joint_samples 0 [138371, 137900] +processed_samples 100 unjoint_samples 100 joint_samples 0 [138371, 137900] +processed_samples 100 unjoint_samples 100 joint_samples 0 [179830, 156142] +processed_samples 100 unjoint_samples 100 joint_samples 0 [164252, 167667] +processed_samples 100 unjoint_samples 100 joint_samples 0 [179830, 156142] +processed_samples 100 unjoint_samples 100 joint_samples 0 [164252, 167667] +processed_samples 100 unjoint_samples 100 joint_samples 0 [161592, 159740] +processed_samples 100 unjoint_samples 100 joint_samples 0 [161592, 159740] +processed_samples 100 unjoint_samples 100 joint_samples 0 [179265, 142987] +processed_samples 100 unjoint_samples 100 joint_samples 0 [179265, 142987] +processed_samples 100 unjoint_samples 100 joint_samples 0 [169456, 167170] +processed_samples 100 unjoint_samples 100 joint_samples 0 [169456, 167170] +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +processed_samples 100 unjoint_samples 100 joint_samples 0 [183251, 185489] +processed_samples 100 unjoint_samples 100 joint_samples 0 [183251, 185489] +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +processed_samples 200 unjoint_samples 200 joint_samples 0 [297211, 295395] +processed_samples 200 unjoint_samples 200 joint_samples 0 [297211, 295395] +processed_samples 200 unjoint_samples 200 joint_samples 0 [299007, 301566] +processed_samples 200 unjoint_samples 200 joint_samples 0 [372899, 377065] +processed_samples 200 unjoint_samples 200 joint_samples 0 [257095, 258790] +processed_samples 200 unjoint_samples 200 joint_samples 0 [321263, 321123] +processed_samples 200 unjoint_samples 200 joint_samples 0 [299007, 301566] +processed_samples 200 unjoint_samples 200 joint_samples 0 [372899, 377065] +processed_samples 200 unjoint_samples 200 joint_samples 0 [321263, 321123] +processed_samples 200 unjoint_samples 200 joint_samples 0 [257095, 258790] +processed_samples 200 unjoint_samples 200 joint_samples 0 [324501, 322883] +processed_samples 200 unjoint_samples 200 joint_samples 0 [324501, 322883] +processed_samples 200 unjoint_samples 200 joint_samples 0 [323134, 322658] +processed_samples 200 unjoint_samples 200 joint_samples 0 [323134, 322658] +processed_samples 200 unjoint_samples 200 joint_samples 0 [409816, 420401] +processed_samples 200 unjoint_samples 200 joint_samples 0 [409816, 420401] +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +processed_samples 300 unjoint_samples 300 joint_samples 0 [432217, 435634] +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +processed_samples 300 unjoint_samples 300 joint_samples 0 [432217, 435634] +processed_samples 300 unjoint_samples 300 joint_samples 0 [437736, 438973] +processed_samples 300 unjoint_samples 300 joint_samples 0 [396387, 398531] +processed_samples 300 unjoint_samples 300 joint_samples 0 [437736, 438973] +processed_samples 300 unjoint_samples 300 joint_samples 0 [396387, 398531] +processed_samples 300 unjoint_samples 300 joint_samples 0 [539031, 539227] +processed_samples 300 unjoint_samples 300 joint_samples 0 [539031, 539227] +processed_samples 300 unjoint_samples 300 joint_samples 0 [441366, 442859] +processed_samples 300 unjoint_samples 300 joint_samples 0 [441366, 442859] +processed_samples 300 unjoint_samples 300 joint_samples 0 [463496, 463655] +processed_samples 300 unjoint_samples 300 joint_samples 0 [463496, 463655] +processed_samples 300 unjoint_samples 300 joint_samples 0 [562854, 561876] +processed_samples 300 unjoint_samples 300 joint_samples 0 [562854, 561876] +processed_samples 300 unjoint_samples 300 joint_samples 0 [531941, 529081] +processed_samples 300 unjoint_samples 300 joint_samples 0 [531941, 529081] +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875c25a00] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +processed_samples 400 unjoint_samples 400 joint_samples 0 [586201, 592183] +processed_samples 400 unjoint_samples 400 joint_samples 0 [586201, 592183] +processed_samples 400 unjoint_samples 400 joint_samples 0 [576707, 578364] +processed_samples 400 unjoint_samples 400 joint_samples 0 [575243, 577322] +processed_samples 400 unjoint_samples 400 joint_samples 0 [576707, 578364] +processed_samples 400 unjoint_samples 400 joint_samples 0 [575243, 577322] +processed_samples 400 unjoint_samples 400 joint_samples 0 [718416, 785789] +processed_samples 400 unjoint_samples 400 joint_samples 0 [767428, 766502] +processed_samples 400 unjoint_samples 400 joint_samples 0 [706574, 707248] +processed_samples 400 unjoint_samples 400 joint_samples 0 [767428, 766502] +processed_samples 400 unjoint_samples 400 joint_samples 0 [718416, 785789] +processed_samples 400 unjoint_samples 400 joint_samples 0 [706574, 707248] +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +processed_samples 400 unjoint_samples 400 joint_samples 0 [594399, 617095] +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +processed_samples 400 unjoint_samples 400 joint_samples 0 [594399, 617095] +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +processed_samples 400 unjoint_samples 400 joint_samples 0 [547548, 548589] +processed_samples 400 unjoint_samples 400 joint_samples 0 [547548, 548589] +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +processed_samples 500 unjoint_samples 500 joint_samples 0 [713733, 712561] +processed_samples 500 unjoint_samples 500 joint_samples 0 [734671, 733133] +processed_samples 500 unjoint_samples 500 joint_samples 0 [713733, 712561] +processed_samples 500 unjoint_samples 500 joint_samples 0 [734671, 733133] +processed_samples 500 unjoint_samples 500 joint_samples 0 [750430, 748411] +processed_samples 500 unjoint_samples 500 joint_samples 0 [864803, 799139] +processed_samples 500 unjoint_samples 500 joint_samples 0 [864803, 799139] +processed_samples 500 unjoint_samples 500 joint_samples 0 [750430, 748411] +processed_samples 500 unjoint_samples 500 joint_samples 0 [720854, 718670] +processed_samples 500 unjoint_samples 500 joint_samples 0 [720854, 718670] +processed_samples 500 unjoint_samples 500 joint_samples 0 [924705, 928348] +processed_samples 500 unjoint_samples 500 joint_samples 0 [924705, 928348] +processed_samples 500 unjoint_samples 500 joint_samples 0 [858892, 860165] +processed_samples 500 unjoint_samples 500 joint_samples 0 [858892, 860165] +processed_samples 500 unjoint_samples 500 joint_samples 0 [884168, 878997] +processed_samples 500 unjoint_samples 500 joint_samples 0 [884168, 878997] +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +................................................................................................processed_samples 600 unjoint_samples 600 joint_samples 1 [1041344, 40606] +processed_samples 600 unjoint_samples 600 joint_samples 1 [1041344, 40606] +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +processed_samples 600 unjoint_samples 600 joint_samples 0 [1009826, 1007146] +processed_samples 600 unjoint_samples 600 joint_samples 0 [1009826, 1007146] +processed_samples 600 unjoint_samples 600 joint_samples 0 [882918, 876515] +processed_samples 600 unjoint_samples 600 joint_samples 0 [882918, 876515] +processed_samples 600 unjoint_samples 600 joint_samples 0 [916253, 917126] +processed_samples 600 unjoint_samples 600 joint_samples 0 [916253, 917126] +processed_samples 600 unjoint_samples 600 joint_samples 0 [932613, 937020] +processed_samples 600 unjoint_samples 600 joint_samples 0 [932613, 937020] +processed_samples 600 unjoint_samples 600 joint_samples 0 [987935, 998226] +processed_samples 600 unjoint_samples 600 joint_samples 0 [987935, 998226] +processed_samples 600 unjoint_samples 600 joint_samples 0 [1027798, 1034793] +processed_samples 600 unjoint_samples 600 joint_samples 0 [1027798, 1034793] +processed_samples 600 unjoint_samples 600 joint_samples 0 [896140, 897576] +processed_samples 600 unjoint_samples 600 joint_samples 0 [896140, 897576] +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eeda7ac640] mmco: unref short failure +[h264 @ 0x55eeda7ac640] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x5638746519c0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +processed_samples 700 unjoint_samples 700 joint_samples 1 [233872, 1017346] +processed_samples 700 unjoint_samples 700 joint_samples 1 [1047134, 120285] +processed_samples 700 unjoint_samples 700 joint_samples 1 [233872, 1017346] +processed_samples 700 unjoint_samples 700 joint_samples 1 [1047134, 120285] +processed_samples 700 unjoint_samples 700 joint_samples 1 [1017035, 54390] +processed_samples 700 unjoint_samples 700 joint_samples 1 [1017035, 54390] +processed_samples 700 unjoint_samples 700 joint_samples 1 [58294, 1034725] +processed_samples 700 unjoint_samples 700 joint_samples 1 [1041344, 327993] +processed_samples 700 unjoint_samples 700 joint_samples 1 [58294, 1034725] +processed_samples 700 unjoint_samples 700 joint_samples 1 [1041344, 327993] +processed_samples 700 unjoint_samples 700 joint_samples 0 [1032847, 1033725] +processed_samples 700 unjoint_samples 700 joint_samples 0 [1032847, 1033725] +processed_samples 700 unjoint_samples 700 joint_samples 1 [287080, 1040706] +processed_samples 700 unjoint_samples 700 joint_samples 1 [287080, 1040706] +processed_samples 700 unjoint_samples 700 joint_samples 1 [1046364, 241459] +processed_samples 700 unjoint_samples 700 joint_samples 1 [1046364, 241459] +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +processed_samples 800 unjoint_samples 800 joint_samples 1 [259690, 1047056] +processed_samples 800 unjoint_samples 800 joint_samples 1 [259690, 1047056] +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +processed_samples 800 unjoint_samples 800 joint_samples 1 [521128, 1017346] +processed_samples 800 unjoint_samples 800 joint_samples 1 [521128, 1017346] +processed_samples 800 unjoint_samples 800 joint_samples 1 [1046364, 671607] +processed_samples 800 unjoint_samples 800 joint_samples 1 [1046364, 671607] +processed_samples 800 unjoint_samples 800 joint_samples 1 [1017035, 391907] +processed_samples 800 unjoint_samples 800 joint_samples 1 [1017035, 391907] +processed_samples 800 unjoint_samples 800 joint_samples 1 [348852, 1034725] +processed_samples 800 unjoint_samples 800 joint_samples 1 [348852, 1034725] +processed_samples 800 unjoint_samples 800 joint_samples 1 [1041344, 614568] +processed_samples 800 unjoint_samples 800 joint_samples 1 [1041344, 614568] +processed_samples 800 unjoint_samples 800 joint_samples 1 [1047134, 521962] +processed_samples 800 unjoint_samples 800 joint_samples 1 [596024, 1040706] +processed_samples 800 unjoint_samples 800 joint_samples 1 [596024, 1040706] +processed_samples 800 unjoint_samples 800 joint_samples 1 [1047134, 521962] +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed38d9400] mmco: unref short failure +[h264 @ 0x55eed38d9400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x5638757cae80] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +processed_samples 900 unjoint_samples 900 joint_samples 1 [538074, 1047056] +processed_samples 900 unjoint_samples 900 joint_samples 1 [1047134, 804611] +processed_samples 900 unjoint_samples 900 joint_samples 1 [621390, 1034725] +processed_samples 900 unjoint_samples 900 joint_samples 1 [884865, 1017346] +processed_samples 900 unjoint_samples 900 joint_samples 1 [538074, 1047056] +processed_samples 900 unjoint_samples 900 joint_samples 1 [1047134, 804611] +processed_samples 900 unjoint_samples 900 joint_samples 1 [884865, 1017346] +processed_samples 900 unjoint_samples 900 joint_samples 1 [1041344, 941193] +processed_samples 900 unjoint_samples 900 joint_samples 1 [621390, 1034725] +processed_samples 900 unjoint_samples 900 joint_samples 1 [1046364, 983917] +processed_samples 900 unjoint_samples 900 joint_samples 1 [1017035, 659398] +processed_samples 900 unjoint_samples 900 joint_samples 1 [951508, 1040706] +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +processed_samples 900 unjoint_samples 900 joint_samples 1 [1046364, 983917] +processed_samples 900 unjoint_samples 900 joint_samples 1 [1017035, 659398] +processed_samples 900 unjoint_samples 900 joint_samples 1 [1041344, 941193] +processed_samples 900 unjoint_samples 900 joint_samples 1 [951508, 1040706] +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638751ede00] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563878b2fd80] mmco: unref short failure +[h264 @ 0x563878b2fd80] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [306595, 1036929] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1040366, 111089] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [306595, 1036929] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [107866, 1041397] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1048054, 224766] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1040366, 111089] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1046980, 244016] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [107866, 1041397] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1048054, 224766] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [1046980, 244016] +processed_samples 1000 unjoint_samples 1000 joint_samples 1 [840124, 1047056] +processed_samples 1000 unjoint_samples 1000 joint_samples 1 [840124, 1047056] +processed_samples 1000 unjoint_samples 1000 joint_samples 1 [993307, 1034725] +processed_samples 1000 unjoint_samples 1000 joint_samples 1 [993307, 1034725] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [20272, 1046708] +processed_samples 1000 unjoint_samples 1000 joint_samples 2 [20272, 1046708] +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x5638751ede00] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed2f56000] mmco: unref short failure +[h264 @ 0x55eed2f56000] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] [h264 @ 0x55eed411f180] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1046832, 200918] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1046832, 200918] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1040366, 381461] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [375393, 1041397] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1040366, 381461] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [591629, 1036929] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [375393, 1041397] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1046980, 619169] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [591629, 1036929] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1046980, 619169] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1012896, 192232] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1012896, 192232] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1048054, 583120] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [1048054, 583120] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [284975, 1046708] +processed_samples 1100 unjoint_samples 1100 joint_samples 2 [284975, 1046708] +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed38d9400] mmco: unref short failure +[h264 @ 0x55eed38d9400] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x56387689cb40] mmco: unref short failure +[h264 @ 0x56387689cb40] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x56387689cb40] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1012896, 418587] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1012896, 418587] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1046832, 439227] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1048054, 868903] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1040366, 679855] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1046832, 439227] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1048054, 868903] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1040366, 679855] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [967580, 1036929] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1046980, 940901] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [744851, 1041397] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [1046980, 940901] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [672309, 1046708] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [967580, 1036929] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [744851, 1041397] +processed_samples 1200 unjoint_samples 1200 joint_samples 2 [672309, 1046708] +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed787a100] Missing reference picture, default is 65530 +[h264 @ 0x55eed787a100] Missing reference picture, default is 65530 +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x5638786aa400] Missing reference picture, default is 65530 +[h264 @ 0x5638786aa400] Missing reference picture, default is 65530 +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [1040366, 955428] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [1040366, 955428] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [193002, 1047338] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [193002, 1047338] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [126499, 1045487] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [126499, 1045487] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [1012896, 652493] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [1012896, 652493] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [206438, 1045797] +processed_samples 1300 unjoint_samples 1300 joint_samples 3 [206438, 1045797] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [1046832, 742329] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [1046832, 742329] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [989655, 1041397] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [989655, 1041397] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [1008679, 1046708] +processed_samples 1300 unjoint_samples 1300 joint_samples 2 [1008679, 1046708] +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1046881, 23599] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [1046881, 23599] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [292035, 1041397] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [292035, 1041397] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [200206, 1047367] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [200206, 1047367] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [251183, 1046708] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [251183, 1046708] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [574908, 1045797] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [574908, 1045797] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [407315, 1045487] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [407315, 1045487] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [423226, 1047338] +processed_samples 1400 unjoint_samples 1400 joint_samples 3 [423226, 1047338] +processed_samples 1400 unjoint_samples 1400 joint_samples 2 [1031653, 1039880] +processed_samples 1400 unjoint_samples 1400 joint_samples 2 [1031653, 1039880] +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x5638746519c0] mmco: unref short failure +[h264 @ 0x5638746519c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [576564, 1041397] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [576564, 1041397] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [262751, 1044123] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [262751, 1044123] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [599066, 1047367] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [599066, 1047367] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1046881, 282249] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [1046881, 282249] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [668828, 1047338] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [668828, 1047338] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [486058, 1046708] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [486058, 1046708] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [678421, 1045487] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [678421, 1045487] +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [992560, 1045797] +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +processed_samples 1500 unjoint_samples 1500 joint_samples 3 [992560, 1045797] +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed64b2cc0] mmco: unref short failure +[h264 @ 0x55eed64b2cc0] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed38d9400] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [939507, 1047367] +processed_samples 1600 unjoint_samples 1600 joint_samples 4 [1041716, 104966] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [939507, 1047367] +processed_samples 1600 unjoint_samples 1600 joint_samples 4 [1041716, 104966] +processed_samples 1600 unjoint_samples 1600 joint_samples 4 [354067, 1045797] +processed_samples 1600 unjoint_samples 1600 joint_samples 4 [354067, 1045797] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [551489, 1044123] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [551489, 1044123] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [1046881, 617475] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [1046881, 617475] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [870894, 1041397] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [900797, 1045487] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [870894, 1041397] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [751216, 1046708] +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [900797, 1045487] +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +processed_samples 1600 unjoint_samples 1600 joint_samples 3 [751216, 1046708] +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1032876, 252877] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1032876, 252877] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1023370, 208732] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1023370, 208732] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [637605, 1045797] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [637605, 1045797] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1046900, 112455] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1046900, 112455] +processed_samples 1700 unjoint_samples 1700 joint_samples 3 [968541, 1046708] +processed_samples 1700 unjoint_samples 1700 joint_samples 3 [968541, 1046708] +processed_samples 1700 unjoint_samples 1700 joint_samples 3 [1046881, 989635] +processed_samples 1700 unjoint_samples 1700 joint_samples 3 [1046881, 989635] +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +processed_samples 1700 unjoint_samples 1700 joint_samples 3 [940378, 1044123] +processed_samples 1700 unjoint_samples 1700 joint_samples 3 [940378, 1044123] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1041716, 368590] +processed_samples 1700 unjoint_samples 1700 joint_samples 4 [1041716, 368590] +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1046881, 221875] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1046881, 221875] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1048251, 238666] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1048251, 238666] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1046900, 389588] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1046900, 389588] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1045991, 451134] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1045991, 451134] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1041716, 707868] +processed_samples 1800 unjoint_samples 1800 joint_samples 5 [1018511, 55142] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1041716, 707868] +processed_samples 1800 unjoint_samples 1800 joint_samples 5 [1018511, 55142] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1023370, 582080] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1023370, 582080] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1032876, 602609] +processed_samples 1800 unjoint_samples 1800 joint_samples 4 [1032876, 602609] +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed49700c0] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x56387416bac0] mmco: unref short failure +[h264 @ 0x56387416bac0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +processed_samples 1900 unjoint_samples 1900 joint_samples 5 [1018511, 369093] +processed_samples 1900 unjoint_samples 1900 joint_samples 5 [1018511, 369093] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1045991, 694904] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1045991, 694904] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1048251, 517492] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1048251, 517492] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1046881, 607956] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1046881, 607956] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1046900, 726602] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1046900, 726602] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1041716, 1010844] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1041716, 1010844] +processed_samples 1900 unjoint_samples 1900 joint_samples 5 [78509, 977924] +processed_samples 1900 unjoint_samples 1900 joint_samples 5 [78509, 977924] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1032876, 919318] +processed_samples 1900 unjoint_samples 1900 joint_samples 4 [1032876, 919318] +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638757cae80] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563875eeff00] mmco: unref short failure +[h264 @ 0x563875eeff00] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [113657, 1044096] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [113657, 1044096] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [499000, 977924] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [499000, 977924] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [1046583, 221873] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [1046583, 221873] +processed_samples 2000 unjoint_samples 2000 joint_samples 4 [1046900, 1006175] +processed_samples 2000 unjoint_samples 2000 joint_samples 4 [1046900, 1006175] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [1018511, 636494] +processed_samples 2000 unjoint_samples 2000 joint_samples 4 [1048251, 766922] +processed_samples 2000 unjoint_samples 2000 joint_samples 5 [1018511, 636494] +processed_samples 2000 unjoint_samples 2000 joint_samples 4 [1048251, 766922] +processed_samples 2000 unjoint_samples 2000 joint_samples 4 [1045991, 1004147] +processed_samples 2000 unjoint_samples 2000 joint_samples 4 [1045991, 1004147] +processed_samples 2000 unjoint_samples 2000 joint_samples 4 [1046881, 913348] +processed_samples 2000 unjoint_samples 2000 joint_samples 4 [1046881, 913348] +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed4d30700] mmco: unref short failure +[h264 @ 0x55eed4d30700] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563878efa700] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +processed_samples 2100 unjoint_samples 2100 joint_samples 4 [1048251, 996730] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [339480, 1044096] +processed_samples 2100 unjoint_samples 2100 joint_samples 4 [1048251, 996730] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [339480, 1044096] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1046881, 197686] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1046900, 360427] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1046881, 197686] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1046900, 360427] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1046583, 530420] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [823585, 977924] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1046583, 530420] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [823585, 977924] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [247718, 1041573] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [247718, 1041573] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1018511, 945024] +processed_samples 2100 unjoint_samples 2100 joint_samples 5 [1018511, 945024] +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed2f56000] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +processed_samples 2200 unjoint_samples 2200 joint_samples 6 [1046215, 37604] +processed_samples 2200 unjoint_samples 2200 joint_samples 6 [1046215, 37604] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [1046881, 420344] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [1046881, 420344] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [1046900, 585603] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [1046900, 585603] +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [564277, 1041573] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [564277, 1041573] +processed_samples 2200 unjoint_samples 2200 joint_samples 6 [109560, 1048076] +processed_samples 2200 unjoint_samples 2200 joint_samples 6 [109560, 1048076] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [1046583, 810527] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [1046583, 810527] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [189422, 1047711] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [189422, 1047711] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [726080, 1044096] +processed_samples 2200 unjoint_samples 2200 joint_samples 5 [726080, 1044096] +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x5638745efa40] mmco: unref short failure +[h264 @ 0x5638745efa40] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [1046215, 386012] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [1046215, 386012] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [565678, 1048076] +processed_samples 2300 unjoint_samples 2300 joint_samples 5 [460775, 1047711] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [565678, 1048076] +processed_samples 2300 unjoint_samples 2300 joint_samples 5 [460775, 1047711] +processed_samples 2300 unjoint_samples 2300 joint_samples 5 [956499, 1041573] +processed_samples 2300 unjoint_samples 2300 joint_samples 5 [956499, 1041573] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [200929, 1035275] +processed_samples 2300 unjoint_samples 2300 joint_samples 6 [200929, 1035275] +processed_samples 2300 unjoint_samples 2300 joint_samples 5 [1046900, 852862] +processed_samples 2300 unjoint_samples 2300 joint_samples 5 [1046900, 852862] +processed_samples 2300 unjoint_samples 2300 joint_samples 5 [1045451, 1046221] +processed_samples 2300 unjoint_samples 2300 joint_samples 5 [1045451, 1046221] +processed_samples 2300 unjoint_samples 2300 joint_samples 5 [1046881, 778318] +processed_samples 2300 unjoint_samples 2300 joint_samples 5 [1046881, 778318] +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x56387735efc0] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [18111, 1036156] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [18111, 1036156] +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1046215, 715042] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1046215, 715042] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [66578, 1046382] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [66578, 1046382] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1045451, 335542] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [1045451, 335542] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [248050, 1047863] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [248050, 1047863] +processed_samples 2400 unjoint_samples 2400 joint_samples 5 [712867, 1047711] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [960640, 1048076] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [960640, 1048076] +processed_samples 2400 unjoint_samples 2400 joint_samples 5 [712867, 1047711] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [576855, 1035275] +processed_samples 2400 unjoint_samples 2400 joint_samples 6 [576855, 1035275] +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563875e2c5c0] mmco: unref short failure +[h264 @ 0x563875e2c5c0] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x56386988a480] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed38d9400] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +processed_samples 2500 unjoint_samples 2500 joint_samples 7 [88122, 1046831] +processed_samples 2500 unjoint_samples 2500 joint_samples 7 [88122, 1046831] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [265435, 1036156] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [265435, 1036156] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1046853, 23318] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1046853, 23318] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1045451, 647951] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [1045451, 647951] +processed_samples 2500 unjoint_samples 2500 joint_samples 7 [211533, 1048076] +processed_samples 2500 unjoint_samples 2500 joint_samples 7 [211533, 1048076] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [549159, 1047863] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [549159, 1047863] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [823515, 1035275] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [823515, 1035275] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [418005, 1046382] +processed_samples 2500 unjoint_samples 2500 joint_samples 6 [418005, 1046382] +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed315a340] mmco: unref short failure +[h264 @ 0x55eed315a340] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875e2c5c0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [1046853, 350256] +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [1046853, 350256] +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [498601, 1036156] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [407000, 1046831] +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [498601, 1036156] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [407000, 1046831] +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [1045451, 862316] +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [1045451, 862316] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [451748, 1048076] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [451748, 1048076] +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [720726, 1046382] +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [720726, 1046382] +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [794704, 1047863] +processed_samples 2600 unjoint_samples 2600 joint_samples 6 [794704, 1047863] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1046954, 2306] +processed_samples 2600 unjoint_samples 2600 joint_samples 7 [1046954, 2306] +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed613ad80] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563877b8aec0] mmco: unref short failure +[h264 @ 0x563877b8aec0] mmco: unref short failure +[h264 @ 0x563877b8aec0] mmco: unref short failure +[h264 @ 0x563877b8aec0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4d30700] mmco: unref short failure +[h264 @ 0x55eed4d30700] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1047683, 73206] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1046954, 267451] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1047683, 73206] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1046954, 267451] +processed_samples 2700 unjoint_samples 2700 joint_samples 6 [1046853, 682166] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [699988, 1046831] +processed_samples 2700 unjoint_samples 2700 joint_samples 6 [1046853, 682166] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [699988, 1046831] +processed_samples 2700 unjoint_samples 2700 joint_samples 6 [872404, 1036156] +processed_samples 2700 unjoint_samples 2700 joint_samples 6 [872404, 1036156] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1046844, 147073] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [1046844, 147073] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [718474, 1048076] +processed_samples 2700 unjoint_samples 2700 joint_samples 7 [718474, 1048076] +processed_samples 2700 unjoint_samples 2700 joint_samples 6 [1002782, 1046382] +processed_samples 2700 unjoint_samples 2700 joint_samples 6 [1002782, 1046382] +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x55eec819a740] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x563874afb5c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x563878e579c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [259588, 1048343] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1046844, 678739] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [259588, 1048343] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1046844, 678739] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [144485, 1038293] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [144485, 1038293] +processed_samples 2800 unjoint_samples 2800 joint_samples 8 [1039826, 45275] +processed_samples 2800 unjoint_samples 2800 joint_samples 8 [1039826, 45275] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1047683, 367654] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1047683, 367654] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1046954, 594802] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [1046954, 594802] +processed_samples 2800 unjoint_samples 2800 joint_samples 6 [1046853, 1036028] +processed_samples 2800 unjoint_samples 2800 joint_samples 6 [1046853, 1036028] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [987764, 1046831] +processed_samples 2800 unjoint_samples 2800 joint_samples 7 [987764, 1046831] +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x56387d4ad200] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x56387d4ad200] mmco: unref short failure +[h264 @ 0x56387d4ad200] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [457632, 1038293] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1046853, 298226] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1046853, 298226] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [457632, 1038293] +processed_samples 2900 unjoint_samples 2900 joint_samples 8 [1046667, 219594] +processed_samples 2900 unjoint_samples 2900 joint_samples 8 [1046667, 219594] +processed_samples 2900 unjoint_samples 2900 joint_samples 8 [1039826, 310004] +processed_samples 2900 unjoint_samples 2900 joint_samples 8 [1039826, 310004] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1047683, 671855] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1047683, 671855] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [536449, 1048343] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [536449, 1048343] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1046954, 947346] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1046954, 947346] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1046844, 1031835] +processed_samples 2900 unjoint_samples 2900 joint_samples 7 [1046844, 1031835] +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [41462, 1033936] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [41462, 1033936] +processed_samples 3000 unjoint_samples 3000 joint_samples 7 [1046853, 630274] +processed_samples 3000 unjoint_samples 3000 joint_samples 7 [1046853, 630274] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [1046667, 565715] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [1046667, 565715] +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [1046844, 270612] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [1046844, 270612] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [254890, 1030061] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [254890, 1030061] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [1039826, 590326] +processed_samples 3000 unjoint_samples 3000 joint_samples 8 [1039826, 590326] +processed_samples 3000 unjoint_samples 3000 joint_samples 7 [971611, 1048343] +processed_samples 3000 unjoint_samples 3000 joint_samples 7 [826135, 1038293] +processed_samples 3000 unjoint_samples 3000 joint_samples 7 [826135, 1038293] +processed_samples 3000 unjoint_samples 3000 joint_samples 7 [971611, 1048343] +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1037342, 32673] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1037342, 32673] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [353032, 1033936] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [353032, 1033936] +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1047448, 331316] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1047448, 331316] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1046844, 566530] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1046844, 566530] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1046667, 887988] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1046667, 887988] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1039826, 952341] +processed_samples 3100 unjoint_samples 3100 joint_samples 7 [1046853, 919328] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [1039826, 952341] +processed_samples 3100 unjoint_samples 3100 joint_samples 7 [1046853, 919328] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [611933, 1030061] +processed_samples 3100 unjoint_samples 3100 joint_samples 8 [611933, 1030061] +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +processed_samples 3200 unjoint_samples 3200 joint_samples 9 [228506, 1035076] +processed_samples 3200 unjoint_samples 3200 joint_samples 9 [228506, 1035076] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [1037342, 334368] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [1037342, 334368] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [665397, 1033936] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [665397, 1033936] +processed_samples 3200 unjoint_samples 3200 joint_samples 9 [129783, 1046111] +processed_samples 3200 unjoint_samples 3200 joint_samples 9 [129783, 1046111] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [143884, 1046452] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [143884, 1046452] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [987655, 1030061] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [987655, 1030061] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [1047448, 624103] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [1047448, 624103] +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [1046844, 928033] +processed_samples 3200 unjoint_samples 3200 joint_samples 8 [1046844, 928033] +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eeda7ac640] mmco: unref short failure +[h264 @ 0x55eeda7ac640] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [470473, 1035076] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [470473, 1035076] +processed_samples 3300 unjoint_samples 3300 joint_samples 8 [1037342, 739217] +processed_samples 3300 unjoint_samples 3300 joint_samples 8 [1037342, 739217] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [187075, 1046655] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [187075, 1046655] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [243781, 1030316] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [243781, 1030316] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [564632, 1046111] +processed_samples 3300 unjoint_samples 3300 joint_samples 8 [425409, 1046452] +processed_samples 3300 unjoint_samples 3300 joint_samples 8 [425409, 1046452] +processed_samples 3300 unjoint_samples 3300 joint_samples 9 [564632, 1046111] +processed_samples 3300 unjoint_samples 3300 joint_samples 8 [1047448, 1004186] +processed_samples 3300 unjoint_samples 3300 joint_samples 8 [1047448, 1004186] +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +processed_samples 3300 unjoint_samples 3300 joint_samples 8 [976993, 1033936] +processed_samples 3300 unjoint_samples 3300 joint_samples 8 [976993, 1033936] +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed4750840] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [316772, 1047089] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [316772, 1047089] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [1047448, 254987] +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [1047448, 254987] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [718021, 1035076] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [903133, 1046111] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [718021, 1035076] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [1047465, 53309] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [1047465, 53309] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [903133, 1046111] +processed_samples 3400 unjoint_samples 3400 joint_samples 8 [696515, 1046452] +processed_samples 3400 unjoint_samples 3400 joint_samples 8 [696515, 1046452] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [511852, 1030316] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [511852, 1030316] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [430197, 1046655] +processed_samples 3400 unjoint_samples 3400 joint_samples 9 [430197, 1046655] +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +processed_samples 3500 unjoint_samples 3500 joint_samples 8 [1016881, 1046452] +processed_samples 3500 unjoint_samples 3500 joint_samples 8 [1016881, 1046452] +processed_samples 3500 unjoint_samples 3500 joint_samples 10 [1036551, 173762] +processed_samples 3500 unjoint_samples 3500 joint_samples 10 [1036551, 173762] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1047448, 699524] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1047448, 699524] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [672029, 1046655] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [672029, 1046655] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1047465, 390831] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1047465, 390831] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [961905, 1030316] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [961905, 1030316] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [682848, 1047089] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [682848, 1047089] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1007168, 1035076] +processed_samples 3500 unjoint_samples 3500 joint_samples 9 [1007168, 1035076] +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed4d30700] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed5deb2c0] mmco: unref short failure +[h264 @ 0x55eed5deb2c0] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1046925, 176155] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1046925, 176155] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1046977, 255944] +processed_samples 3600 unjoint_samples 3600 joint_samples 9 [183095, 1046452] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1036551, 437780] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1046977, 255944] +processed_samples 3600 unjoint_samples 3600 joint_samples 9 [183095, 1046452] +processed_samples 3600 unjoint_samples 3600 joint_samples 10 [1036551, 437780] +processed_samples 3600 unjoint_samples 3600 joint_samples 9 [1047465, 726506] +processed_samples 3600 unjoint_samples 3600 joint_samples 9 [1047448, 1005332] +processed_samples 3600 unjoint_samples 3600 joint_samples 9 [1047465, 726506] +processed_samples 3600 unjoint_samples 3600 joint_samples 9 [1047448, 1005332] +processed_samples 3600 unjoint_samples 3600 joint_samples 9 [985780, 1046655] +processed_samples 3600 unjoint_samples 3600 joint_samples 9 [985780, 1046655] +processed_samples 3600 unjoint_samples 3600 joint_samples 9 [980884, 1047089] +processed_samples 3600 unjoint_samples 3600 joint_samples 9 [980884, 1047089] +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +processed_samples 3700 unjoint_samples 3700 joint_samples 9 [1047465, 1008008] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1046925, 448638] +[h264 @ 0x563875ebb3c0] mmco: unref short failure +processed_samples 3700 unjoint_samples 3700 joint_samples 9 [1047465, 1008008] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1046925, 448638] +[h264 @ 0x55eed3501780] mmco: unref short failure +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [234658, 1047089] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [234658, 1047089] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1047448, 270839] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1047448, 270839] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1045933, 183516] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1045933, 183516] +processed_samples 3700 unjoint_samples 3700 joint_samples 9 [583950, 1046452] +processed_samples 3700 unjoint_samples 3700 joint_samples 9 [583950, 1046452] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1036551, 756280] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1046977, 488099] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1046977, 488099] +processed_samples 3700 unjoint_samples 3700 joint_samples 10 [1036551, 756280] +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed49700c0] mmco: unref short failure +[h264 @ 0x55eed49700c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +processed_samples 3800 unjoint_samples 3800 joint_samples 11 [1036551, 62591] +processed_samples 3800 unjoint_samples 3800 joint_samples 11 [1036551, 62591] +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [268053, 1023502] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [268053, 1023502] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [518429, 1047089] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [518429, 1047089] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1047448, 499445] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1047448, 499445] +processed_samples 3800 unjoint_samples 3800 joint_samples 9 [944387, 1046452] +processed_samples 3800 unjoint_samples 3800 joint_samples 9 [944387, 1046452] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1045933, 553627] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1045933, 553627] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1046925, 702670] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1046925, 702670] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1046977, 951752] +processed_samples 3800 unjoint_samples 3800 joint_samples 10 [1046977, 951752] +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x5638750d0200] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638750d0200] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed4750840] mmco: unref short failure +[h264 @ 0x55eed4750840] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [766460, 1023502] +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [171286, 1045745] +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [766460, 1023502] +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [1047153, 301241] +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [171286, 1045745] +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [1047153, 301241] +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [1036551, 465998] +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [763186, 1047089] +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [1036551, 465998] +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [1046977, 287476] +processed_samples 3900 unjoint_samples 3900 joint_samples 11 [1046977, 287476] +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [763186, 1047089] +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [1047448, 938873] +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [1047448, 938873] +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [1045933, 894861] +processed_samples 3900 unjoint_samples 3900 joint_samples 10 [1045933, 894861] +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x5638750e2ec0] mmco: unref short failure +[h264 @ 0x5638750e2ec0] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x56387483a1c0] mmco: unref short failure +[h264 @ 0x56387483a1c0] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [440025, 1045745] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [440025, 1045745] +[h264 @ 0x55eed4575800] Missing reference picture, default is 65530 +[h264 @ 0x563875231d40] Missing reference picture, default is 65530 +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [109311, 1038069] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [109311, 1038069] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [1047457, 13908] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [1047457, 13908] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [336552, 1046915] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [336552, 1046915] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [1036551, 744035] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [1036551, 744035] +processed_samples 4000 unjoint_samples 4000 joint_samples 10 [1047153, 613835] +processed_samples 4000 unjoint_samples 4000 joint_samples 10 [1047153, 613835] +processed_samples 4000 unjoint_samples 4000 joint_samples 10 [1034242, 1047089] +processed_samples 4000 unjoint_samples 4000 joint_samples 10 [1034242, 1047089] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [1046977, 486387] +processed_samples 4000 unjoint_samples 4000 joint_samples 11 [1046977, 486387] +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x56387872e140] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x56386988a480] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x56386988a480] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [1047457, 290751] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [1043159, 300989] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [1047457, 290751] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [1043159, 300989] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [505764, 1038069] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [505764, 1038069] +processed_samples 4100 unjoint_samples 4100 joint_samples 12 [287929, 1042954] +processed_samples 4100 unjoint_samples 4100 joint_samples 12 [287929, 1042954] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [708615, 1045745] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [708615, 1045745] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [666810, 1046915] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [666810, 1046915] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [1046977, 924415] +processed_samples 4100 unjoint_samples 4100 joint_samples 11 [1046977, 924415] +processed_samples 4100 unjoint_samples 4100 joint_samples 10 [1047153, 948619] +processed_samples 4100 unjoint_samples 4100 joint_samples 10 [1047153, 948619] +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563877b08e00] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563877b08e00] mmco: unref short failure +[h264 @ 0x563877b08e00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [227020, 1044808] +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [227020, 1044808] +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [1047457, 633655] +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [1047457, 633655] +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [1043159, 579146] +processed_samples 4200 unjoint_samples 4200 joint_samples 12 [293522, 1037622] +processed_samples 4200 unjoint_samples 4200 joint_samples 12 [293522, 1037622] +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [1043159, 579146] +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [1045991, 1045745] +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [1045991, 1045745] +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [931157, 1038069] +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [931157, 1038069] +processed_samples 4200 unjoint_samples 4200 joint_samples 12 [524484, 1042954] +processed_samples 4200 unjoint_samples 4200 joint_samples 12 [524484, 1042954] +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [964578, 1046915] +processed_samples 4200 unjoint_samples 4200 joint_samples 11 [964578, 1046915] +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875069800] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638786aa400] [h264 @ 0x55eed3a89540] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x55eed3a89540] [h264 @ 0x5638786aa400] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [191316, 1042702] +processed_samples 4300 unjoint_samples 4300 joint_samples 11 [1047457, 949411] +processed_samples 4300 unjoint_samples 4300 joint_samples 11 [1047457, 949411] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [191316, 1042702] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [1043819, 261624] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [1043819, 261624] +processed_samples 4300 unjoint_samples 4300 joint_samples 11 [544332, 1044808] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [1046317, 323599] +processed_samples 4300 unjoint_samples 4300 joint_samples 11 [544332, 1044808] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [747496, 1037622] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [1046317, 323599] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [747496, 1037622] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [769260, 1042954] +processed_samples 4300 unjoint_samples 4300 joint_samples 12 [769260, 1042954] +processed_samples 4300 unjoint_samples 4300 joint_samples 11 [1043159, 1001767] +processed_samples 4300 unjoint_samples 4300 joint_samples 11 [1043159, 1001767] +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563875864b80] co located POCs unavailable +[h264 @ 0x55eed37ad140] co located POCs unavailable +[h264 @ 0x563875864b80] co located POCs unavailable +[h264 @ 0x55eed37ad140] co located POCs unavailable +[h264 @ 0x563875864b80] co located POCs unavailable +[h264 @ 0x55eed37ad140] co located POCs unavailable +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x56387476b100] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x56387476b100] mmco: unref short failure +/root/miniconda3/envs/py38/lib/python3.8/site-packages/PIL/TiffImagePlugin.py:870: UserWarning: Corrupt EXIF data. Expecting to read 12 bytes but only got 8. + warnings.warn(str(msg)) +/root/miniconda3/envs/py38/lib/python3.8/site-packages/PIL/TiffImagePlugin.py:870: UserWarning: Corrupt EXIF data. Expecting to read 12 bytes but only got 8. + warnings.warn(str(msg)) +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387476b100] mmco: unref short failure +[h264 @ 0x56387476b100] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563874c4b200] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563874c4b200] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [293908, 1036275] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [293908, 1036275] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [1046317, 597374] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [1046317, 597374] +[h264 @ 0x55eed3846880] mmco: unref short failure +[h264 @ 0x55eed3846880] mmco: unref short failure +[h264 @ 0x55eed3846880] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 4400 unjoint_samples 4400 joint_samples 13 [1001551, 125978] +processed_samples 4400 unjoint_samples 4400 joint_samples 13 [1001551, 125978] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [1047457, 244044] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [1047457, 244044] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [449665, 1042702] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [449665, 1042702] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [1043819, 550203] +processed_samples 4400 unjoint_samples 4400 joint_samples 12 [1043819, 550203] +processed_samples 4400 unjoint_samples 4400 joint_samples 11 [885429, 1044808] +processed_samples 4400 unjoint_samples 4400 joint_samples 11 [885429, 1044808] +processed_samples 4400 unjoint_samples 4400 joint_samples 13 [131285, 1042954] +processed_samples 4400 unjoint_samples 4400 joint_samples 13 [131285, 1042954] +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [174984, 1047222] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [174984, 1047222] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [624662, 1036275] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [624662, 1036275] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [1047457, 587580] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [1047457, 587580] +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +processed_samples 4500 unjoint_samples 4500 joint_samples 13 [461685, 1042954] +processed_samples 4500 unjoint_samples 4500 joint_samples 13 [461685, 1042954] +processed_samples 4500 unjoint_samples 4500 joint_samples 13 [1001551, 370967] +processed_samples 4500 unjoint_samples 4500 joint_samples 13 [1001551, 370967] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [1046317, 1041011] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [1046317, 1041011] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [1043819, 806370] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [1043819, 806370] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [794615, 1042702] +processed_samples 4500 unjoint_samples 4500 joint_samples 12 [794615, 1042702] +[h264 @ 0x55eedb863d80] mmco: unref short failure +[h264 @ 0x55eedb863d80] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [1046317, 312149] +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [1046317, 312149] +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [1047369, 29147] +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [1047369, 29147] +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [193190, 1031624] +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [193190, 1031624] +processed_samples 4600 unjoint_samples 4600 joint_samples 12 [1047457, 880775] +processed_samples 4600 unjoint_samples 4600 joint_samples 12 [1047457, 880775] +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [770011, 1042954] +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [770011, 1042954] +processed_samples 4600 unjoint_samples 4600 joint_samples 12 [514805, 1047222] +processed_samples 4600 unjoint_samples 4600 joint_samples 12 [514805, 1047222] +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +processed_samples 4600 unjoint_samples 4600 joint_samples 12 [910010, 1036275] +processed_samples 4600 unjoint_samples 4600 joint_samples 12 [910010, 1036275] +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [1001551, 674917] +processed_samples 4600 unjoint_samples 4600 joint_samples 13 [1001551, 674917] +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eedc0ef980] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +processed_samples 4700 unjoint_samples 4700 joint_samples 14 [1027709, 60995] +processed_samples 4700 unjoint_samples 4700 joint_samples 14 [1027709, 60995] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [127447, 1046565] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [127447, 1046565] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [445281, 1031624] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [445281, 1031624] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [238435, 1046706] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [238435, 1046706] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1046317, 567609] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1046317, 567609] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1047369, 307205] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1047369, 307205] +processed_samples 4700 unjoint_samples 4700 joint_samples 12 [764771, 1047222] +processed_samples 4700 unjoint_samples 4700 joint_samples 12 [764771, 1047222] +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1001551, 913859] +processed_samples 4700 unjoint_samples 4700 joint_samples 13 [1001551, 913859] +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eedbe71b00] mmco: unref short failure +[h264 @ 0x55eedbe71b00] mmco: unref short failure +[h264 @ 0x55eedbe71b00] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563875e2ca80] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [14639, 1047222] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [14639, 1047222] +processed_samples 4800 unjoint_samples 4800 joint_samples 14 [57536, 1047267] +processed_samples 4800 unjoint_samples 4800 joint_samples 14 [57536, 1047267] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [485150, 1046706] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [485150, 1046706] +processed_samples 4800 unjoint_samples 4800 joint_samples 14 [1027709, 451093] +processed_samples 4800 unjoint_samples 4800 joint_samples 14 [1027709, 451093] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [1046317, 900128] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [1046317, 900128] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [1047369, 586521] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [1047369, 586521] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [743233, 1031624] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [743233, 1031624] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [489823, 1046565] +processed_samples 4800 unjoint_samples 4800 joint_samples 13 [489823, 1046565] +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x55eedbe71b00] mmco: unref short failure +[h264 @ 0x55eedbe71b00] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed2f56000] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [313284, 1014074] +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [313284, 1014074] +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [367898, 1047222] +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [367898, 1047222] +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [790499, 1046565] +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [790499, 1046565] +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [362781, 1047267] +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [362781, 1047267] +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [844500, 1046706] +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [1027709, 762134] +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [1047369, 951098] +processed_samples 4900 unjoint_samples 4900 joint_samples 14 [1027709, 762134] +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [844500, 1046706] +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [1047369, 951098] +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [1035471, 1036283] +processed_samples 4900 unjoint_samples 4900 joint_samples 13 [1035471, 1036283] +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [1048063, 41695] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [1048063, 41695] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [140084, 1046706] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [140084, 1046706] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [1047369, 170398] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [1047369, 170398] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [698693, 1014074] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [698693, 1014074] +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387d4ad200] mmco: unref short failure +[h264 @ 0x56387d4ad200] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +[h264 @ 0x56387d4ad200] mmco: unref short failure +[h264 @ 0x56387d4ad200] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [1027709, 1026532] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [1027709, 1026532] +processed_samples 5000 unjoint_samples 5000 joint_samples 13 [668318, 1047222] +processed_samples 5000 unjoint_samples 5000 joint_samples 13 [668318, 1047222] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [316187, 1039542] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [316187, 1039542] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [818283, 1047267] +processed_samples 5000 unjoint_samples 5000 joint_samples 14 [818283, 1047267] +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563876d3cf40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [429610, 1046706] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [1048063, 273849] +processed_samples 5100 unjoint_samples 5100 joint_samples 15 [1046692, 316401] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [1047369, 514652] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [429610, 1046706] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [1048063, 273849] +processed_samples 5100 unjoint_samples 5100 joint_samples 15 [1046692, 316401] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [1047369, 514652] +processed_samples 5100 unjoint_samples 5100 joint_samples 15 [87866, 1047267] +processed_samples 5100 unjoint_samples 5100 joint_samples 15 [87866, 1047267] +processed_samples 5100 unjoint_samples 5100 joint_samples 15 [4055, 1047597] +processed_samples 5100 unjoint_samples 5100 joint_samples 13 [1012239, 1047222] +processed_samples 5100 unjoint_samples 5100 joint_samples 13 [1012239, 1047222] +processed_samples 5100 unjoint_samples 5100 joint_samples 15 [4055, 1047597] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [638675, 1039542] +processed_samples 5100 unjoint_samples 5100 joint_samples 14 [638675, 1039542] +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x56386988a480] mmco: unref short failure +[h264 @ 0x56386988a480] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x55eed48247c0] mmco: unref short failure +[h264 @ 0x55eed48247c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638786e5a40] [h264 @ 0x55eed3f1cb80] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x5638786e5a40] [h264 @ 0x55eed3f1cb80] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1047369, 775608] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1047369, 775608] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1042611, 321546] +processed_samples 5200 unjoint_samples 5200 joint_samples 15 [307706, 1047597] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1042611, 321546] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [808061, 1046706] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1048063, 687707] +processed_samples 5200 unjoint_samples 5200 joint_samples 15 [307706, 1047597] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [1048063, 687707] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [808061, 1046706] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [957055, 1039542] +processed_samples 5200 unjoint_samples 5200 joint_samples 15 [1046692, 659174] +processed_samples 5200 unjoint_samples 5200 joint_samples 14 [957055, 1039542] +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +processed_samples 5200 unjoint_samples 5200 joint_samples 15 [1046692, 659174] +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +processed_samples 5200 unjoint_samples 5200 joint_samples 15 [351600, 1047267] +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +processed_samples 5200 unjoint_samples 5200 joint_samples 15 [351600, 1047267] +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563875069800] mmco: unref short failure +[h264 @ 0x563875069800] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x56387483a1c0] mmco: unref short failure +[h264 @ 0x56387483a1c0] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x56387a424100] mmco: unref short failure +[h264 @ 0x56387a424100] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eedb863d80] mmco: unref short failure +[h264 @ 0x55eedb863d80] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +processed_samples 5300 unjoint_samples 5300 joint_samples 14 [1048063, 973905] +processed_samples 5300 unjoint_samples 5300 joint_samples 14 [1048063, 973905] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [1037961, 206103] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [1037961, 206103] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [584142, 1047597] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [584142, 1047597] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [1005237, 87076] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [1005237, 87076] +processed_samples 5300 unjoint_samples 5300 joint_samples 14 [1042611, 555626] +processed_samples 5300 unjoint_samples 5300 joint_samples 14 [1047369, 1045437] +processed_samples 5300 unjoint_samples 5300 joint_samples 14 [1042611, 555626] +processed_samples 5300 unjoint_samples 5300 joint_samples 14 [1047369, 1045437] +processed_samples 5300 unjoint_samples 5300 joint_samples 16 [36260, 1025495] +processed_samples 5300 unjoint_samples 5300 joint_samples 16 [36260, 1025495] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [769856, 1047267] +processed_samples 5300 unjoint_samples 5300 joint_samples 15 [769856, 1047267] +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed43cfc80] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed43cfc80] mmco: unref short failure +[h264 @ 0x55eed43cfc80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x56387636fd40] mmco: unref short failure +[h264 @ 0x56387636fd40] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x5638794f17c0] [h264 @ 0x55eed452d680] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed8007e80] [h264 @ 0x563876420f40] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x563876420f40] [h264 @ 0x55eed8007e80] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [270259, 1047382] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [270259, 1047382] +processed_samples 5400 unjoint_samples 5400 joint_samples 16 [12248, 1047267] +processed_samples 5400 unjoint_samples 5400 joint_samples 16 [12248, 1047267] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1005237, 296192] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1005237, 296192] +processed_samples 5400 unjoint_samples 5400 joint_samples 14 [1042611, 931959] +processed_samples 5400 unjoint_samples 5400 joint_samples 14 [1042611, 931959] +processed_samples 5400 unjoint_samples 5400 joint_samples 16 [318320, 1025495] +processed_samples 5400 unjoint_samples 5400 joint_samples 16 [318320, 1025495] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1047369, 387727] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1047369, 387727] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [842742, 1047597] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [842742, 1047597] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1037961, 527463] +processed_samples 5400 unjoint_samples 5400 joint_samples 15 [1037961, 527463] +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d8f880] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3778700] mmco: unref short failure +[h264 @ 0x55eed3778700] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563876a14f80] mmco: unref short failure +[h264 @ 0x563876a14f80] mmco: unref short failure +[h264 @ 0x55eed5d8f880] mmco: unref short failure +[h264 @ 0x55eed5d8f880] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +processed_samples 5500 unjoint_samples 5500 joint_samples 16 [1044752, 104133] +processed_samples 5500 unjoint_samples 5500 joint_samples 16 [1044752, 104133] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [1047274, 137380] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [1047274, 137380] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [624529, 1047382] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [624529, 1047382] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [1005237, 542490] +processed_samples 5500 unjoint_samples 5500 joint_samples 16 [395159, 1047267] +processed_samples 5500 unjoint_samples 5500 joint_samples 16 [395159, 1047267] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [1005237, 542490] +processed_samples 5500 unjoint_samples 5500 joint_samples 16 [607509, 1025495] +processed_samples 5500 unjoint_samples 5500 joint_samples 16 [607509, 1025495] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [1047369, 921402] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [1047369, 921402] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [1037961, 918670] +processed_samples 5500 unjoint_samples 5500 joint_samples 15 [1037961, 918670] +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875e963c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +processed_samples 5600 unjoint_samples 5600 joint_samples 15 [1047274, 478589] +processed_samples 5600 unjoint_samples 5600 joint_samples 15 [1047274, 478589] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [1044752, 328770] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [1044752, 328770] +processed_samples 5600 unjoint_samples 5600 joint_samples 15 [1005237, 924618] +processed_samples 5600 unjoint_samples 5600 joint_samples 15 [1005237, 924618] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [1040679, 338984] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [1040679, 338984] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [217836, 1039334] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [217836, 1039334] +processed_samples 5600 unjoint_samples 5600 joint_samples 15 [847934, 1047382] +processed_samples 5600 unjoint_samples 5600 joint_samples 15 [847934, 1047382] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [628115, 1047267] +processed_samples 5600 unjoint_samples 5600 joint_samples 16 [628115, 1047267] +processed_samples 5600 unjoint_samples 5600 joint_samples 17 [1006719, 69480] +processed_samples 5600 unjoint_samples 5600 joint_samples 17 [1006719, 69480] +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x56387538acc0] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875017700] mmco: unref short failure +[h264 @ 0x563875017700] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +processed_samples 5700 unjoint_samples 5700 joint_samples 15 [1047274, 721542] +processed_samples 5700 unjoint_samples 5700 joint_samples 15 [1047274, 721542] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1047100, 87698] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1047100, 87698] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [472345, 1039334] +processed_samples 5700 unjoint_samples 5700 joint_samples 17 [1006719, 463298] +processed_samples 5700 unjoint_samples 5700 joint_samples 17 [1006719, 463298] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [472345, 1039334] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1025063, 357384] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1025063, 357384] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1040679, 691494] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1040679, 691494] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1023300, 1047267] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1023300, 1047267] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1044752, 600250] +processed_samples 5700 unjoint_samples 5700 joint_samples 16 [1044752, 600250] +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed4750840] mmco: unref short failure +[h264 @ 0x55eed4750840] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x563877070480] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1025063, 643951] +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1025063, 643951] +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1047100, 519950] +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1047100, 519950] +processed_samples 5800 unjoint_samples 5800 joint_samples 17 [1045473, 405677] +processed_samples 5800 unjoint_samples 5800 joint_samples 17 [1045473, 405677] +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1044752, 907529] +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1044752, 907529] +processed_samples 5800 unjoint_samples 5800 joint_samples 17 [1006719, 830940] +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +processed_samples 5800 unjoint_samples 5800 joint_samples 17 [1006719, 830940] +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1040679, 1015156] +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [746923, 1039334] +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [746923, 1039334] +processed_samples 5800 unjoint_samples 5800 joint_samples 15 [1047274, 996883] +processed_samples 5800 unjoint_samples 5800 joint_samples 15 [1047274, 996883] +processed_samples 5800 unjoint_samples 5800 joint_samples 16 [1040679, 1015156] +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1045687, 105192] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1045687, 105192] +processed_samples 5900 unjoint_samples 5900 joint_samples 16 [1047100, 792425] +processed_samples 5900 unjoint_samples 5900 joint_samples 16 [1047100, 792425] +processed_samples 5900 unjoint_samples 5900 joint_samples 16 [1025063, 916269] +processed_samples 5900 unjoint_samples 5900 joint_samples 16 [1025063, 916269] +processed_samples 5900 unjoint_samples 5900 joint_samples 16 [231927, 1046371] +processed_samples 5900 unjoint_samples 5900 joint_samples 16 [231927, 1046371] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1044093, 384990] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1044093, 384990] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1045473, 680318] +processed_samples 5900 unjoint_samples 5900 joint_samples 17 [1045473, 680318] +processed_samples 5900 unjoint_samples 5900 joint_samples 18 [81281, 1044774] +processed_samples 5900 unjoint_samples 5900 joint_samples 18 [81281, 1044774] +processed_samples 5900 unjoint_samples 5900 joint_samples 16 [993895, 1039334] +processed_samples 5900 unjoint_samples 5900 joint_samples 16 [993895, 1039334] +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [262553, 1031102] +processed_samples 6000 unjoint_samples 6000 joint_samples 18 [378380, 1044774] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1007274, 307018] +processed_samples 6000 unjoint_samples 6000 joint_samples 18 [378380, 1044774] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [262553, 1031102] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1007274, 307018] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [120829, 1045731] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1045687, 448002] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1044093, 627326] +processed_samples 6000 unjoint_samples 6000 joint_samples 16 [556176, 1046371] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [120829, 1045731] +processed_samples 6000 unjoint_samples 6000 joint_samples 16 [556176, 1046371] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1044093, 627326] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1045687, 448002] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1045473, 1006055] +processed_samples 6000 unjoint_samples 6000 joint_samples 17 [1045473, 1006055] +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +processed_samples 6100 unjoint_samples 6100 joint_samples 18 [287605, 1036668] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [468061, 1045731] +processed_samples 6100 unjoint_samples 6100 joint_samples 18 [287605, 1036668] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [579535, 1031102] +processed_samples 6100 unjoint_samples 6100 joint_samples 16 [879681, 1046371] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [468061, 1045731] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [1044093, 940303] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [579535, 1031102] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [1044093, 940303] +processed_samples 6100 unjoint_samples 6100 joint_samples 16 [879681, 1046371] +processed_samples 6100 unjoint_samples 6100 joint_samples 18 [668060, 1044774] +processed_samples 6100 unjoint_samples 6100 joint_samples 18 [668060, 1044774] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [1007274, 544842] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [1007274, 544842] +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [1045687, 678841] +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +processed_samples 6100 unjoint_samples 6100 joint_samples 17 [1045687, 678841] +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875069800] mmco: unref short failure +[h264 @ 0x563875069800] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [538447, 1036668] +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [1007274, 928763] +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [1046805, 142619] +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [1045575, 211825] +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [899115, 1031102] +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [796923, 1045731] +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [1045687, 963141] +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [1007274, 928763] +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [951978, 1044774] +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [1045575, 211825] +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [538447, 1036668] +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [1046805, 142619] +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [899115, 1031102] +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [796923, 1045731] +processed_samples 6200 unjoint_samples 6200 joint_samples 17 [1045687, 963141] +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 6200 unjoint_samples 6200 joint_samples 18 [951978, 1044774] +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +processed_samples 6300 unjoint_samples 6300 joint_samples 17 [1045575, 454663] +processed_samples 6300 unjoint_samples 6300 joint_samples 17 [1045575, 454663] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1046556, 138211] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1046556, 138211] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [16713, 1047368] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [16713, 1047368] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1047756, 199475] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1047756, 199475] +processed_samples 6300 unjoint_samples 6300 joint_samples 19 [164419, 1047073] +processed_samples 6300 unjoint_samples 6300 joint_samples 19 [164419, 1047073] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [230684, 1047851] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [230684, 1047851] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1046805, 464079] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [1046805, 464079] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [883881, 1036668] +processed_samples 6300 unjoint_samples 6300 joint_samples 18 [883881, 1036668] +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1047756, 484901] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1047756, 484901] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [482025, 1047368] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [482025, 1047368] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1046556, 480966] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1046556, 480966] +processed_samples 6400 unjoint_samples 6400 joint_samples 19 [503506, 1047073] +processed_samples 6400 unjoint_samples 6400 joint_samples 19 [503506, 1047073] +processed_samples 6400 unjoint_samples 6400 joint_samples 19 [119053, 1039175] +processed_samples 6400 unjoint_samples 6400 joint_samples 19 [119053, 1039175] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1046805, 740321] +processed_samples 6400 unjoint_samples 6400 joint_samples 17 [1045575, 705927] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [1046805, 740321] +processed_samples 6400 unjoint_samples 6400 joint_samples 17 [1045575, 705927] +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [526595, 1047851] +processed_samples 6400 unjoint_samples 6400 joint_samples 18 [526595, 1047851] +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [1047225, 41559] +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [871854, 1047073] +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [818728, 1047851] +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [1046556, 956307] +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [818728, 1047851] +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [871854, 1047073] +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [1047225, 41559] +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [1046556, 956307] +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [812646, 1047368] +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [418584, 1039175] +processed_samples 6500 unjoint_samples 6500 joint_samples 17 [1045575, 960889] +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [812646, 1047368] +processed_samples 6500 unjoint_samples 6500 joint_samples 19 [418584, 1039175] +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [1047756, 769212] +processed_samples 6500 unjoint_samples 6500 joint_samples 17 [1045575, 960889] +processed_samples 6500 unjoint_samples 6500 joint_samples 18 [1047756, 769212] +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eeda7ac640] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1046948, 34004] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1046948, 34004] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [289538, 1047168] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [289538, 1047168] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1038459, 116258] +processed_samples 6600 unjoint_samples 6600 joint_samples 20 [84298, 1047073] +processed_samples 6600 unjoint_samples 6600 joint_samples 20 [84298, 1047073] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1038459, 116258] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1047225, 320244] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1047225, 320244] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1046556, 203116] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [1046556, 203116] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [767426, 1039175] +processed_samples 6600 unjoint_samples 6600 joint_samples 19 [767426, 1039175] +processed_samples 6600 unjoint_samples 6600 joint_samples 18 [268660, 1043489] +processed_samples 6600 unjoint_samples 6600 joint_samples 18 [268660, 1043489] +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1047225, 688463] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1047225, 688463] +processed_samples 6700 unjoint_samples 6700 joint_samples 20 [1041176, 41754] +processed_samples 6700 unjoint_samples 6700 joint_samples 20 [1041176, 41754] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1046948, 404228] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1046948, 404228] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [629833, 1047168] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [629833, 1047168] +processed_samples 6700 unjoint_samples 6700 joint_samples 18 [646251, 1043489] +processed_samples 6700 unjoint_samples 6700 joint_samples 18 [646251, 1043489] +processed_samples 6700 unjoint_samples 6700 joint_samples 20 [380920, 1047073] +processed_samples 6700 unjoint_samples 6700 joint_samples 20 [380920, 1047073] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1038459, 406306] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1038459, 406306] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1046556, 529886] +processed_samples 6700 unjoint_samples 6700 joint_samples 19 [1046556, 529886] +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +processed_samples 6800 unjoint_samples 6800 joint_samples 20 [66310, 1018703] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [1046948, 673482] +processed_samples 6800 unjoint_samples 6800 joint_samples 20 [66310, 1018703] +processed_samples 6800 unjoint_samples 6800 joint_samples 18 [1043901, 1044212] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [1038459, 780251] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [1046948, 673482] +processed_samples 6800 unjoint_samples 6800 joint_samples 18 [1043901, 1044212] +processed_samples 6800 unjoint_samples 6800 joint_samples 20 [1041176, 460480] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [1038459, 780251] +processed_samples 6800 unjoint_samples 6800 joint_samples 20 [1041176, 460480] +processed_samples 6800 unjoint_samples 6800 joint_samples 20 [731424, 1047073] +processed_samples 6800 unjoint_samples 6800 joint_samples 20 [731424, 1047073] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [1046556, 871148] +processed_samples 6800 unjoint_samples 6800 joint_samples 19 [1046556, 871148] +processed_samples 6800 unjoint_samples 6800 joint_samples 20 [3606, 1047168] +processed_samples 6800 unjoint_samples 6800 joint_samples 20 [3606, 1047168] +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x56387d4ad200] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +processed_samples 6900 unjoint_samples 6900 joint_samples 19 [281635, 1046468] +processed_samples 6900 unjoint_samples 6900 joint_samples 19 [281635, 1046468] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [1047656, 162571] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [1047656, 162571] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [72621, 1046602] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [72621, 1046602] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [329524, 1018703] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [329524, 1018703] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [452684, 1047168] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [452684, 1047168] +processed_samples 6900 unjoint_samples 6900 joint_samples 19 [1046948, 1046894] +processed_samples 6900 unjoint_samples 6900 joint_samples 19 [1046948, 1046894] +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [1041176, 804170] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [1041176, 804170] +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [981242, 1047073] +processed_samples 6900 unjoint_samples 6900 joint_samples 20 [981242, 1047073] +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [621337, 1018703] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [621337, 1018703] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [1047656, 419545] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [1047656, 419545] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [322237, 1046602] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [250666, 1046948] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [250666, 1046948] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [322237, 1046602] +processed_samples 7000 unjoint_samples 7000 joint_samples 21 [1034547, 332901] +processed_samples 7000 unjoint_samples 7000 joint_samples 21 [1034547, 332901] +processed_samples 7000 unjoint_samples 7000 joint_samples 21 [196083, 944571] +processed_samples 7000 unjoint_samples 7000 joint_samples 21 [196083, 944571] +processed_samples 7000 unjoint_samples 7000 joint_samples 19 [693533, 1046468] +processed_samples 7000 unjoint_samples 7000 joint_samples 19 [693533, 1046468] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [722950, 1047168] +processed_samples 7000 unjoint_samples 7000 joint_samples 20 [722950, 1047168] +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [983360, 1047168] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [983360, 1047168] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [592795, 1046602] +processed_samples 7100 unjoint_samples 7100 joint_samples 19 [986311, 1046468] +[h264 @ 0x5638750176c0] mmco: unref short failure +processed_samples 7100 unjoint_samples 7100 joint_samples 21 [573248, 944571] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [626696, 1046948] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [1019263, 1018703] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [1047656, 761197] +processed_samples 7100 unjoint_samples 7100 joint_samples 21 [1034547, 797804] +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [592795, 1046602] +processed_samples 7100 unjoint_samples 7100 joint_samples 19 [986311, 1046468] +[h264 @ 0x55eed3a5e240] mmco: unref short failure +processed_samples 7100 unjoint_samples 7100 joint_samples 21 [573248, 944571] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [626696, 1046948] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [1019263, 1018703] +processed_samples 7100 unjoint_samples 7100 joint_samples 20 [1047656, 761197] +processed_samples 7100 unjoint_samples 7100 joint_samples 21 [1034547, 797804] +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eeda7ac640] mmco: unref short failure +[h264 @ 0x55eeda7ac640] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [507340, 1047168] +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [507340, 1047168] +processed_samples 7200 unjoint_samples 7200 joint_samples 20 [1043957, 332926] +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [137166, 1039172] +processed_samples 7200 unjoint_samples 7200 joint_samples 20 [1043957, 332926] +processed_samples 7200 unjoint_samples 7200 joint_samples 22 [300744, 971234] +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [197544, 1048528] +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +processed_samples 7200 unjoint_samples 7200 joint_samples 20 [879867, 1046602] +processed_samples 7200 unjoint_samples 7200 joint_samples 20 [879867, 1046602] +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [137166, 1039172] +processed_samples 7200 unjoint_samples 7200 joint_samples 22 [300744, 971234] +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [197544, 1048528] +processed_samples 7200 unjoint_samples 7200 joint_samples 20 [880272, 1046948] +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [891435, 944571] +processed_samples 7200 unjoint_samples 7200 joint_samples 21 [891435, 944571] +processed_samples 7200 unjoint_samples 7200 joint_samples 20 [880272, 1046948] +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [455349, 1048528] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [1032635, 185097] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [1041279, 130666] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [394850, 1039172] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [756527, 1047168] +processed_samples 7300 unjoint_samples 7300 joint_samples 20 [1043957, 659687] +processed_samples 7300 unjoint_samples 7300 joint_samples 22 [586568, 971234] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [455349, 1048528] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [1043219, 1044320] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [1032635, 185097] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [1041279, 130666] +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [394850, 1039172] +processed_samples 7300 unjoint_samples 7300 joint_samples 20 [1043957, 659687] +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [756527, 1047168] +processed_samples 7300 unjoint_samples 7300 joint_samples 22 [586568, 971234] +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +processed_samples 7300 unjoint_samples 7300 joint_samples 21 [1043219, 1044320] +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eeda7ac640] mmco: unref short failure +[h264 @ 0x55eeda7ac640] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638751ede00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [1041279, 430100] +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [1032635, 436399] +processed_samples 7400 unjoint_samples 7400 joint_samples 20 [1043957, 1013418] +processed_samples 7400 unjoint_samples 7400 joint_samples 22 [2263, 1047813] +processed_samples 7400 unjoint_samples 7400 joint_samples 22 [1047020, 298128] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [642775, 1039172] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [731163, 1048528] +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [1041279, 430100] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [1032635, 436399] +processed_samples 7400 unjoint_samples 7400 joint_samples 22 [845892, 971234] +processed_samples 7400 unjoint_samples 7400 joint_samples 20 [1043957, 1013418] +processed_samples 7400 unjoint_samples 7400 joint_samples 22 [1047020, 298128] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [642775, 1039172] +processed_samples 7400 unjoint_samples 7400 joint_samples 21 [731163, 1048528] +processed_samples 7400 unjoint_samples 7400 joint_samples 22 [2263, 1047813] +processed_samples 7400 unjoint_samples 7400 joint_samples 22 [845892, 971234] +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875475c00] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563876a14fc0] mmco: unref short failure +[h264 @ 0x563875475c00] mmco: unref short failure +[h264 @ 0x563875475c00] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875475c00] mmco: unref short failure +[h264 @ 0x563875475c00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +processed_samples 7500 unjoint_samples 7500 joint_samples 22 [457080, 1047813] +processed_samples 7500 unjoint_samples 7500 joint_samples 22 [457080, 1047813] +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [338737, 1044785] +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [1032635, 675466] +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [1041279, 728490] +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [338737, 1044785] +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [1041279, 728490] +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [1032635, 675466] +processed_samples 7500 unjoint_samples 7500 joint_samples 22 [1047020, 639129] +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +processed_samples 7500 unjoint_samples 7500 joint_samples 22 [1047020, 639129] +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [973361, 1039172] +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [973361, 1039172] +processed_samples 7500 unjoint_samples 7500 joint_samples 23 [68261, 1046730] +processed_samples 7500 unjoint_samples 7500 joint_samples 23 [68261, 1046730] +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [1044270, 1048528] +processed_samples 7500 unjoint_samples 7500 joint_samples 21 [1044270, 1048528] +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [807319, 1047813] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [807319, 1047813] +processed_samples 7600 unjoint_samples 7600 joint_samples 21 [639035, 1044785] +processed_samples 7600 unjoint_samples 7600 joint_samples 21 [639035, 1044785] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1033292, 240879] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1033292, 240879] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1046369, 305154] +processed_samples 7600 unjoint_samples 7600 joint_samples 23 [390632, 1046730] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1046369, 305154] +processed_samples 7600 unjoint_samples 7600 joint_samples 23 [390632, 1046730] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1047020, 964088] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [1047020, 964088] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [3846, 1046930] +processed_samples 7600 unjoint_samples 7600 joint_samples 22 [3846, 1046930] +processed_samples 7600 unjoint_samples 7600 joint_samples 21 [1032635, 943463] +processed_samples 7600 unjoint_samples 7600 joint_samples 21 [1032635, 943463] +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +processed_samples 7700 unjoint_samples 7700 joint_samples 23 [1040856, 102043] +processed_samples 7700 unjoint_samples 7700 joint_samples 23 [1040856, 102043] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [1032635, 192058] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [1032635, 192058] +processed_samples 7700 unjoint_samples 7700 joint_samples 23 [234770, 1040573] +processed_samples 7700 unjoint_samples 7700 joint_samples 23 [234770, 1040573] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [248947, 1046930] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [248947, 1046930] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [1046369, 626545] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [1046369, 626545] +processed_samples 7700 unjoint_samples 7700 joint_samples 23 [727089, 1046730] +processed_samples 7700 unjoint_samples 7700 joint_samples 23 [727089, 1046730] +processed_samples 7700 unjoint_samples 7700 joint_samples 21 [999587, 1044785] +processed_samples 7700 unjoint_samples 7700 joint_samples 21 [999587, 1044785] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [1033292, 560503] +processed_samples 7700 unjoint_samples 7700 joint_samples 22 [1033292, 560503] +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563879d9c000] mmco: unref short failure +[h264 @ 0x563879d9c000] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [1048448, 209741] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [1032635, 461524] +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 7800 unjoint_samples 7800 joint_samples 23 [1040856, 433188] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [626144, 1046930] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [1048448, 209741] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [1032635, 461524] +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +processed_samples 7800 unjoint_samples 7800 joint_samples 23 [1040856, 433188] +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [626144, 1046930] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [1046369, 964942] +processed_samples 7800 unjoint_samples 7800 joint_samples 23 [544996, 1040573] +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [1033292, 882664] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [1046369, 964942] +processed_samples 7800 unjoint_samples 7800 joint_samples 23 [544996, 1040573] +processed_samples 7800 unjoint_samples 7800 joint_samples 22 [1033292, 882664] +processed_samples 7800 unjoint_samples 7800 joint_samples 23 [981404, 1046730] +processed_samples 7800 unjoint_samples 7800 joint_samples 23 [981404, 1046730] +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +processed_samples 7900 unjoint_samples 7900 joint_samples 22 [1048448, 471280] +processed_samples 7900 unjoint_samples 7900 joint_samples 22 [1048448, 471280] +processed_samples 7900 unjoint_samples 7900 joint_samples 22 [1032635, 705558] +processed_samples 7900 unjoint_samples 7900 joint_samples 22 [1032635, 705558] +processed_samples 7900 unjoint_samples 7900 joint_samples 24 [1040056, 246076] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [169502, 1047398] +processed_samples 7900 unjoint_samples 7900 joint_samples 24 [1040056, 246076] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [169502, 1047398] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [1046582, 143476] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [1046582, 143476] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [1040856, 825511] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [1040856, 825511] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [791886, 1040573] +processed_samples 7900 unjoint_samples 7900 joint_samples 23 [791886, 1040573] +processed_samples 7900 unjoint_samples 7900 joint_samples 22 [863691, 1046930] +processed_samples 7900 unjoint_samples 7900 joint_samples 22 [863691, 1046930] +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed3a95340] mmco: unref short failure +[h264 @ 0x55eed3a95340] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [441824, 1047398] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [441824, 1047398] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [1045827, 179421] +processed_samples 8000 unjoint_samples 8000 joint_samples 24 [101577, 1030643] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [1045827, 179421] +processed_samples 8000 unjoint_samples 8000 joint_samples 24 [101577, 1030643] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [57691, 1039685] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [57691, 1039685] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [1046582, 501674] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [1046582, 501674] +processed_samples 8000 unjoint_samples 8000 joint_samples 24 [1040056, 521786] +processed_samples 8000 unjoint_samples 8000 joint_samples 24 [1040056, 521786] +processed_samples 8000 unjoint_samples 8000 joint_samples 22 [1048448, 841428] +processed_samples 8000 unjoint_samples 8000 joint_samples 22 [1048448, 841428] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [1028931, 1040573] +processed_samples 8000 unjoint_samples 8000 joint_samples 23 [1028931, 1040573] +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [1045827, 482039] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [45763, 1047676] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [336327, 1039685] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [1045827, 482039] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [336327, 1039685] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [45763, 1047676] +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +processed_samples 8100 unjoint_samples 8100 joint_samples 24 [465012, 1030643] +processed_samples 8100 unjoint_samples 8100 joint_samples 24 [465012, 1030643] +processed_samples 8100 unjoint_samples 8100 joint_samples 24 [1040056, 778852] +processed_samples 8100 unjoint_samples 8100 joint_samples 24 [1035485, 296399] +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +processed_samples 8100 unjoint_samples 8100 joint_samples 24 [1035485, 296399] +processed_samples 8100 unjoint_samples 8100 joint_samples 24 [1040056, 778852] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [728021, 1047398] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [728021, 1047398] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [1046582, 773892] +processed_samples 8100 unjoint_samples 8100 joint_samples 23 [1046582, 773892] +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +processed_samples 8200 unjoint_samples 8200 joint_samples 23 [297730, 1047676] +processed_samples 8200 unjoint_samples 8200 joint_samples 23 [297730, 1047676] +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [1046985, 24617] +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [1046985, 24617] +processed_samples 8200 unjoint_samples 8200 joint_samples 25 [41605, 1045603] +processed_samples 8200 unjoint_samples 8200 joint_samples 25 [41605, 1045603] +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [196682, 1004216] +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [196682, 1004216] +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +processed_samples 8200 unjoint_samples 8200 joint_samples 23 [576045, 1039685] +processed_samples 8200 unjoint_samples 8200 joint_samples 23 [576045, 1039685] +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [1035485, 545780] +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [1035485, 545780] +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [822440, 1030643] +processed_samples 8200 unjoint_samples 8200 joint_samples 24 [822440, 1030643] +[h264 @ 0x55eed4750840] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +processed_samples 8200 unjoint_samples 8200 joint_samples 23 [1045827, 835419] +processed_samples 8200 unjoint_samples 8200 joint_samples 23 [1045827, 835419] +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +processed_samples 8300 unjoint_samples 8300 joint_samples 25 [23785, 1043497] +processed_samples 8300 unjoint_samples 8300 joint_samples 25 [23785, 1043497] +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [1046454, 65690] +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [1046454, 65690] +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +processed_samples 8300 unjoint_samples 8300 joint_samples 25 [311873, 1045603] +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [1046985, 370352] +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [474627, 1004216] +[h264 @ 0x563875487640] mmco: unref short failure +processed_samples 8300 unjoint_samples 8300 joint_samples 25 [311873, 1045603] +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [1046985, 370352] +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [474627, 1004216] +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +processed_samples 8300 unjoint_samples 8300 joint_samples 23 [877732, 1039685] +[h264 @ 0x55eed4edcac0] mmco: unref short failure +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [1035485, 819620] +processed_samples 8300 unjoint_samples 8300 joint_samples 23 [877732, 1039685] +processed_samples 8300 unjoint_samples 8300 joint_samples 24 [1035485, 819620] +processed_samples 8300 unjoint_samples 8300 joint_samples 23 [571243, 1047676] +processed_samples 8300 unjoint_samples 8300 joint_samples 23 [571243, 1047676] +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +processed_samples 8400 unjoint_samples 8400 joint_samples 25 [330465, 1043497] +processed_samples 8400 unjoint_samples 8400 joint_samples 25 [330465, 1043497] +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [77962, 1047593] +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [77962, 1047593] +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [1046985, 737460] +processed_samples 8400 unjoint_samples 8400 joint_samples 25 [558496, 1045603] +processed_samples 8400 unjoint_samples 8400 joint_samples 25 [105057, 1025364] +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [1046454, 417698] +processed_samples 8400 unjoint_samples 8400 joint_samples 25 [105057, 1025364] +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [782918, 1004216] +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [1046985, 737460] +processed_samples 8400 unjoint_samples 8400 joint_samples 25 [558496, 1045603] +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [1046454, 417698] +processed_samples 8400 unjoint_samples 8400 joint_samples 24 [782918, 1004216] +processed_samples 8400 unjoint_samples 8400 joint_samples 23 [792017, 1047676] +processed_samples 8400 unjoint_samples 8400 joint_samples 23 [792017, 1047676] +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed4d30700] mmco: unref short failure +[h264 @ 0x55eed4d30700] mmco: unref short failure +[h264 @ 0x55eed4d30700] mmco: unref short failure +[h264 @ 0x55eed4d30700] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3916b80] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +processed_samples 8500 unjoint_samples 8500 joint_samples 24 [1046919, 25226] +processed_samples 8500 unjoint_samples 8500 joint_samples 24 [1046919, 25226] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [690520, 1043497] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [690520, 1043497] +processed_samples 8500 unjoint_samples 8500 joint_samples 24 [396632, 1047593] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [405585, 1025364] +processed_samples 8500 unjoint_samples 8500 joint_samples 24 [396632, 1047593] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [1046791, 19676] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [405585, 1025364] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [1046791, 19676] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [75565, 1027907] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [75565, 1027907] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [927059, 1045603] +processed_samples 8500 unjoint_samples 8500 joint_samples 25 [927059, 1045603] +processed_samples 8500 unjoint_samples 8500 joint_samples 24 [1046454, 720788] +processed_samples 8500 unjoint_samples 8500 joint_samples 24 [1046454, 720788] +[h264 @ 0x563875069800] mmco: unref short failure +[h264 @ 0x563875069800] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x5638750e35c0] mmco: unref short failure +[h264 @ 0x5638750e35c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [1027785, 1043497] +processed_samples 8600 unjoint_samples 8600 joint_samples 26 [223017, 1045603] +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [1046791, 329223] +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [421672, 1027907] +processed_samples 8600 unjoint_samples 8600 joint_samples 24 [1046919, 305918] +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [721753, 1025364] +processed_samples 8600 unjoint_samples 8600 joint_samples 24 [853248, 1047593] +processed_samples 8600 unjoint_samples 8600 joint_samples 24 [1046454, 1016346] +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +processed_samples 8600 unjoint_samples 8600 joint_samples 24 [1046919, 305918] +processed_samples 8600 unjoint_samples 8600 joint_samples 26 [223017, 1045603] +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [1046791, 329223] +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [721753, 1025364] +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [421672, 1027907] +processed_samples 8600 unjoint_samples 8600 joint_samples 24 [853248, 1047593] +processed_samples 8600 unjoint_samples 8600 joint_samples 25 [1027785, 1043497] +processed_samples 8600 unjoint_samples 8600 joint_samples 24 [1046454, 1016346] +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +processed_samples 8700 unjoint_samples 8700 joint_samples 26 [451035, 1046288] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [1045922, 62693] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [1045922, 62693] +processed_samples 8700 unjoint_samples 8700 joint_samples 26 [451035, 1046288] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [1046454, 228407] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [1046454, 228407] +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +processed_samples 8700 unjoint_samples 8700 joint_samples 24 [1046919, 530291] +processed_samples 8700 unjoint_samples 8700 joint_samples 24 [1046919, 530291] +processed_samples 8700 unjoint_samples 8700 joint_samples 26 [494244, 1045603] +processed_samples 8700 unjoint_samples 8700 joint_samples 26 [494244, 1045603] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [747180, 1027907] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [747180, 1027907] +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [1046791, 783968] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [1046791, 783968] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [1002229, 1025364] +processed_samples 8700 unjoint_samples 8700 joint_samples 25 [1002229, 1025364] +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed2f56000] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [1046791, 22409] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [1046791, 22409] +processed_samples 8800 unjoint_samples 8800 joint_samples 25 [1045922, 488362] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [795260, 1046288] +processed_samples 8800 unjoint_samples 8800 joint_samples 25 [1045922, 488362] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [795260, 1046288] +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +processed_samples 8800 unjoint_samples 8800 joint_samples 25 [1046454, 526828] +processed_samples 8800 unjoint_samples 8800 joint_samples 25 [1046454, 526828] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [357063, 1032193] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [357063, 1032193] +processed_samples 8800 unjoint_samples 8800 joint_samples 25 [1039157, 1035439] +processed_samples 8800 unjoint_samples 8800 joint_samples 25 [1039157, 1035439] +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eedb863d80] mmco: unref short failure +processed_samples 8800 unjoint_samples 8800 joint_samples 24 [1046919, 805591] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [1002117, 1045603] +processed_samples 8800 unjoint_samples 8800 joint_samples 24 [1046919, 805591] +processed_samples 8800 unjoint_samples 8800 joint_samples 26 [1002117, 1045603] +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x56386988a480] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [249066, 1047879] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [249066, 1047879] +processed_samples 8900 unjoint_samples 8900 joint_samples 27 [265032, 1045603] +processed_samples 8900 unjoint_samples 8900 joint_samples 27 [265032, 1045603] +processed_samples 8900 unjoint_samples 8900 joint_samples 25 [158277, 1042212] +processed_samples 8900 unjoint_samples 8900 joint_samples 25 [158277, 1042212] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [1046791, 432390] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [1046791, 432390] +processed_samples 8900 unjoint_samples 8900 joint_samples 25 [1045922, 842549] +processed_samples 8900 unjoint_samples 8900 joint_samples 25 [1045922, 842549] +processed_samples 8900 unjoint_samples 8900 joint_samples 27 [1027048, 135264] +processed_samples 8900 unjoint_samples 8900 joint_samples 27 [1027048, 135264] +processed_samples 8900 unjoint_samples 8900 joint_samples 25 [1046454, 863913] +processed_samples 8900 unjoint_samples 8900 joint_samples 25 [1046454, 863913] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [738698, 1032193] +processed_samples 8900 unjoint_samples 8900 joint_samples 26 [738698, 1032193] +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [123938, 1042030] +[h264 @ 0x55eed47b4d40] mmco: unref short failure +processed_samples 9000 unjoint_samples 9000 joint_samples 27 [1047643, 64213]processed_samples 9000 unjoint_samples 9000 joint_samples 27 [1047643, 64213] + +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [123938, 1042030] +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [1046454, 113007] +processed_samples 9000 unjoint_samples 9000 joint_samples 25 [458153, 1042212] +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [1046454, 113007] +processed_samples 9000 unjoint_samples 9000 joint_samples 25 [458153, 1042212] +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [1046791, 742527] +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [1046791, 742527] +processed_samples 9000 unjoint_samples 9000 joint_samples 27 [529711, 1045603] +processed_samples 9000 unjoint_samples 9000 joint_samples 27 [529711, 1045603] +processed_samples 9000 unjoint_samples 9000 joint_samples 27 [1027048, 403797] +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [515003, 1047879] +processed_samples 9000 unjoint_samples 9000 joint_samples 27 [1027048, 403797] +processed_samples 9000 unjoint_samples 9000 joint_samples 26 [515003, 1047879] +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x5638746519c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x5638746519c0] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x5638746519c0] mmco: unref short failure +[h264 @ 0x5638746519c0] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +processed_samples 9100 unjoint_samples 9100 joint_samples 26 [1046454, 358991] +processed_samples 9100 unjoint_samples 9100 joint_samples 26 [1046454, 358991] +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [875320, 1045603] +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [875320, 1045603] +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [89890, 1006389] +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [89890, 1006389] +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [1047643, 358419] +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [1047643, 358419] +processed_samples 9100 unjoint_samples 9100 joint_samples 26 [742902, 1047879] +processed_samples 9100 unjoint_samples 9100 joint_samples 26 [742902, 1047879] +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [1027048, 755207] +processed_samples 9100 unjoint_samples 9100 joint_samples 27 [1027048, 755207] +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x5638746519c0] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x5638746519c0] mmco: unref short failure +processed_samples 9100 unjoint_samples 9100 joint_samples 25 [739022, 1042212] +processed_samples 9100 unjoint_samples 9100 joint_samples 26 [354799, 1042030] +processed_samples 9100 unjoint_samples 9100 joint_samples 25 [739022, 1042212] +processed_samples 9100 unjoint_samples 9100 joint_samples 26 [354799, 1042030] +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x5638746519c0] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x5638746519c0] mmco: unref short failure +[h264 @ 0x55eed2f56000] mmco: unref short failure +[h264 @ 0x55eed2f56000] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +processed_samples 9200 unjoint_samples 9200 joint_samples 28 [1038775, 97059] +processed_samples 9200 unjoint_samples 9200 joint_samples 28 [1038775, 97059] +processed_samples 9200 unjoint_samples 9200 joint_samples 26 [1046454, 695739] +processed_samples 9200 unjoint_samples 9200 joint_samples 26 [1046454, 695739] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [1047745, 6863] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [1047745, 6863] +processed_samples 9200 unjoint_samples 9200 joint_samples 28 [1038281, 150040] +processed_samples 9200 unjoint_samples 9200 joint_samples 28 [1038281, 150040] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [398709, 1006389] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [398709, 1006389] +processed_samples 9200 unjoint_samples 9200 joint_samples 26 [629994, 1042030] +processed_samples 9200 unjoint_samples 9200 joint_samples 26 [629994, 1042030] +processed_samples 9200 unjoint_samples 9200 joint_samples 25 [1005966, 1042212] +processed_samples 9200 unjoint_samples 9200 joint_samples 25 [1005966, 1042212] +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [1047643, 750496] +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +processed_samples 9200 unjoint_samples 9200 joint_samples 27 [1047643, 750496] +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x563879f50800] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563875069800] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +processed_samples 9300 unjoint_samples 9300 joint_samples 28 [1038775, 364135] +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 9300 unjoint_samples 9300 joint_samples 28 [1038775, 364135] +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1047745, 336179] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [652419, 1006389] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [652419, 1006389] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1047745, 336179] +processed_samples 9300 unjoint_samples 9300 joint_samples 28 [1038281, 507218] +processed_samples 9300 unjoint_samples 9300 joint_samples 26 [1047585, 298013] +processed_samples 9300 unjoint_samples 9300 joint_samples 28 [1038281, 507218] +processed_samples 9300 unjoint_samples 9300 joint_samples 26 [1047585, 298013] +processed_samples 9300 unjoint_samples 9300 joint_samples 26 [1002242, 1042030] +processed_samples 9300 unjoint_samples 9300 joint_samples 26 [1002242, 1042030] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1047643, 1006004] +processed_samples 9300 unjoint_samples 9300 joint_samples 27 [1047643, 1006004] +processed_samples 9300 unjoint_samples 9300 joint_samples 26 [1046454, 954360] +processed_samples 9300 unjoint_samples 9300 joint_samples 26 [1046454, 954360] +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed4d30700] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x56386988a480] mmco: unref short failure +[h264 @ 0x56386988a480] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x56386988a480] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875e2ca80] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [306612, 1045583] +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [306612, 1045583] +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [251334, 1046690] +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [251334, 1046690] +processed_samples 9400 unjoint_samples 9400 joint_samples 26 [1047585, 599668] +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [1047745, 641869] +processed_samples 9400 unjoint_samples 9400 joint_samples 26 [1047585, 599668] +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [1047745, 641869] +processed_samples 9400 unjoint_samples 9400 joint_samples 28 [1038775, 608004] +processed_samples 9400 unjoint_samples 9400 joint_samples 28 [1038775, 608004] +processed_samples 9400 unjoint_samples 9400 joint_samples 28 [1047643, 282514] +processed_samples 9400 unjoint_samples 9400 joint_samples 28 [1047643, 282514] +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [1008695, 1010332] +processed_samples 9400 unjoint_samples 9400 joint_samples 27 [1008695, 1010332] +processed_samples 9400 unjoint_samples 9400 joint_samples 28 [1038281, 834788] +processed_samples 9400 unjoint_samples 9400 joint_samples 28 [1038281, 834788] +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x5638751ede00] mmco: unref short failure +[h264 @ 0x5638751ede00] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638751ede00] mmco: unref short failure +[h264 @ 0x5638751ede00] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [1042818, 1042754] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [1042818, 1042754] +processed_samples 9500 unjoint_samples 9500 joint_samples 29 [144358, 1040076] +processed_samples 9500 unjoint_samples 9500 joint_samples 29 [144358, 1040076] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [223943, 1043423] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [223943, 1043423] +processed_samples 9500 unjoint_samples 9500 joint_samples 27 [543928, 1046690] +processed_samples 9500 unjoint_samples 9500 joint_samples 27 [543928, 1046690] +processed_samples 9500 unjoint_samples 9500 joint_samples 27 [718613, 1045583] +processed_samples 9500 unjoint_samples 9500 joint_samples 27 [718613, 1045583] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [1047643, 536255] +processed_samples 9500 unjoint_samples 9500 joint_samples 28 [1047643, 536255] +processed_samples 9500 unjoint_samples 9500 joint_samples 26 [1047585, 874255] +processed_samples 9500 unjoint_samples 9500 joint_samples 26 [1047585, 874255] +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +processed_samples 9500 unjoint_samples 9500 joint_samples 27 [1047745, 944718] +processed_samples 9500 unjoint_samples 9500 joint_samples 27 [1047745, 944718] +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x5638782d6d40] mmco: unref short failure +[h264 @ 0x5638782d6d40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +processed_samples 9600 unjoint_samples 9600 joint_samples 27 [204003, 1047296] +processed_samples 9600 unjoint_samples 9600 joint_samples 27 [204003, 1047296] +processed_samples 9600 unjoint_samples 9600 joint_samples 29 [272993, 1046965] +processed_samples 9600 unjoint_samples 9600 joint_samples 29 [272993, 1046965] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [465421, 1043423] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [465421, 1043423] +processed_samples 9600 unjoint_samples 9600 joint_samples 27 [1018934, 1045583] +processed_samples 9600 unjoint_samples 9600 joint_samples 27 [1018934, 1045583] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [310942, 1043488] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [310942, 1043488] +processed_samples 9600 unjoint_samples 9600 joint_samples 27 [920884, 1046690] +processed_samples 9600 unjoint_samples 9600 joint_samples 27 [920884, 1046690] +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +processed_samples 9600 unjoint_samples 9600 joint_samples 29 [431124, 1040076] +processed_samples 9600 unjoint_samples 9600 joint_samples 29 [431124, 1040076] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [1047643, 892033] +processed_samples 9600 unjoint_samples 9600 joint_samples 28 [1047643, 892033] +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [1026473, 354438] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [1026473, 354438] +processed_samples 9700 unjoint_samples 9700 joint_samples 27 [660827, 1047296] +processed_samples 9700 unjoint_samples 9700 joint_samples 27 [660827, 1047296] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [160973, 1047013] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [160973, 1047013] +[h264 @ 0x5638785a6400] mmco: unref short failure +processed_samples 9700 unjoint_samples 9700 joint_samples 29 [99375, 1047294] +[h264 @ 0x55eed787a100] mmco: unref short failure +processed_samples 9700 unjoint_samples 9700 joint_samples 29 [504566, 1046965] +processed_samples 9700 unjoint_samples 9700 joint_samples 29 [99375, 1047294] +processed_samples 9700 unjoint_samples 9700 joint_samples 29 [504566, 1046965] +processed_samples 9700 unjoint_samples 9700 joint_samples 29 [723427, 1040076] +processed_samples 9700 unjoint_samples 9700 joint_samples 29 [723427, 1040076] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [757690, 1043423] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [757690, 1043423] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [685390, 1043488] +processed_samples 9700 unjoint_samples 9700 joint_samples 28 [685390, 1043488] +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563878814a80] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x56387d4ad200] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +processed_samples 9800 unjoint_samples 9800 joint_samples 28 [1026473, 659070] +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +processed_samples 9800 unjoint_samples 9800 joint_samples 27 [997472, 1047296] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [792287, 1046965] +processed_samples 9800 unjoint_samples 9800 joint_samples 28 [540510, 1047013] +[h264 @ 0x56387996a9c0] mmco: unref short failure +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [356560, 1047294] +processed_samples 9800 unjoint_samples 9800 joint_samples 28 [949488, 1043488] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [984212, 1040076] +processed_samples 9800 unjoint_samples 9800 joint_samples 28 [1026473, 659070] +processed_samples 9800 unjoint_samples 9800 joint_samples 27 [997472, 1047296] +processed_samples 9800 unjoint_samples 9800 joint_samples 28 [1035407, 1043423] +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +processed_samples 9800 unjoint_samples 9800 joint_samples 28 [540510, 1047013] +[h264 @ 0x55eed3a89540] mmco: unref short failure +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [356560, 1047294] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [792287, 1046965] +processed_samples 9800 unjoint_samples 9800 joint_samples 28 [949488, 1043488] +processed_samples 9800 unjoint_samples 9800 joint_samples 29 [984212, 1040076] +processed_samples 9800 unjoint_samples 9800 joint_samples 28 [1035407, 1043423] +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +processed_samples 9900 unjoint_samples 9900 joint_samples 28 [819754, 1047013] +processed_samples 9900 unjoint_samples 9900 joint_samples 28 [819754, 1047013] +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [242930, 1046769] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [242930, 1046769] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [344955, 1047964] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [344955, 1047964] +processed_samples 9900 unjoint_samples 9900 joint_samples 28 [321534, 1047296] +processed_samples 9900 unjoint_samples 9900 joint_samples 28 [321534, 1047296] +processed_samples 9900 unjoint_samples 9900 joint_samples 30 [1035801, 230677] +processed_samples 9900 unjoint_samples 9900 joint_samples 30 [1035801, 230677] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [655848, 1047294] +processed_samples 9900 unjoint_samples 9900 joint_samples 29 [655848, 1047294] +processed_samples 9900 unjoint_samples 9900 joint_samples 28 [1026473, 941823] +processed_samples 9900 unjoint_samples 9900 joint_samples 28 [1026473, 941823] +processed_samples 9900 unjoint_samples 9900 joint_samples 30 [1041523, 14572] +processed_samples 9900 unjoint_samples 9900 joint_samples 30 [1041523, 14572] +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [649276, 1047964] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [649276, 1047964] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [150504, 1048063] +processed_samples 10000 unjoint_samples 10000 joint_samples 30 [1041523, 391481] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [79391, 1047013] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [150504, 1048063] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [79391, 1047013] +processed_samples 10000 unjoint_samples 10000 joint_samples 30 [1041523, 391481] +processed_samples 10000 unjoint_samples 10000 joint_samples 30 [1035801, 474146] +processed_samples 10000 unjoint_samples 10000 joint_samples 30 [1035801, 474146] +processed_samples 10000 unjoint_samples 10000 joint_samples 28 [637236, 1047296] +processed_samples 10000 unjoint_samples 10000 joint_samples 28 [637236, 1047296] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [604392, 1046769] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [604392, 1046769] +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [982666, 1047294] +processed_samples 10000 unjoint_samples 10000 joint_samples 29 [982666, 1047294] +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed2f56000] mmco: unref short failure +[h264 @ 0x55eed2f56000] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed2f56000] mmco: unref short failure +[h264 @ 0x55eed2f56000] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1041523, 662533] +processed_samples 10100 unjoint_samples 10100 joint_samples 28 [965460, 1047296] +processed_samples 10100 unjoint_samples 10100 joint_samples 29 [358324, 1047013] +processed_samples 10100 unjoint_samples 10100 joint_samples 29 [418387, 1048063] +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1024549, 303112] +processed_samples 10100 unjoint_samples 10100 joint_samples 29 [973894, 1046769] +processed_samples 10100 unjoint_samples 10100 joint_samples 28 [965460, 1047296] +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1041523, 662533] +processed_samples 10100 unjoint_samples 10100 joint_samples 29 [358324, 1047013] +processed_samples 10100 unjoint_samples 10100 joint_samples 29 [418387, 1048063] +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1035801, 754242] +processed_samples 10100 unjoint_samples 10100 joint_samples 29 [977577, 1047964] +processed_samples 10100 unjoint_samples 10100 joint_samples 29 [977577, 1047964] +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1024549, 303112] +processed_samples 10100 unjoint_samples 10100 joint_samples 29 [973894, 1046769] +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +processed_samples 10100 unjoint_samples 10100 joint_samples 30 [1035801, 754242] +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x56387483a1c0] mmco: unref short failure +[h264 @ 0x56387483a1c0] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 10200 unjoint_samples 10200 joint_samples 29 [226508, 1048015] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1047300, 305920] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [214802, 1046769] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1041523, 961666] +processed_samples 10200 unjoint_samples 10200 joint_samples 29 [226508, 1048015] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1047300, 305920] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [214802, 1046769] +processed_samples 10200 unjoint_samples 10200 joint_samples 29 [568277, 1047013] +processed_samples 10200 unjoint_samples 10200 joint_samples 29 [713438, 1048063] +processed_samples 10200 unjoint_samples 10200 joint_samples 29 [568277, 1047013] +processed_samples 10200 unjoint_samples 10200 joint_samples 29 [713438, 1048063] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1041523, 961666] +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1024549, 601154] +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1035801, 1035003] +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1024549, 601154] +processed_samples 10200 unjoint_samples 10200 joint_samples 30 [1035801, 1035003] +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed42c50c0] mmco: unref short failure +[h264 @ 0x55eed42c50c0] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eeda7ac640] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [31935, 1048063] +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [31935, 1048063] +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 10300 unjoint_samples 10300 joint_samples 31 [1046111, 229617] +processed_samples 10300 unjoint_samples 10300 joint_samples 31 [1047379, 177439] +processed_samples 10300 unjoint_samples 10300 joint_samples 31 [1047379, 177439] +processed_samples 10300 unjoint_samples 10300 joint_samples 31 [1046111, 229617] +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [1047300, 553564] +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [1047300, 553564] +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [529983, 1046769] +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [529983, 1046769] +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +processed_samples 10300 unjoint_samples 10300 joint_samples 29 [550900, 1048015] +processed_samples 10300 unjoint_samples 10300 joint_samples 29 [550900, 1048015] +processed_samples 10300 unjoint_samples 10300 joint_samples 29 [819512, 1047013] +processed_samples 10300 unjoint_samples 10300 joint_samples 29 [819512, 1047013] +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [1024549, 975772] +processed_samples 10300 unjoint_samples 10300 joint_samples 30 [1024549, 975772] +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed4575800] [h264 @ 0x563875231d40] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875069800] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +processed_samples 10400 unjoint_samples 10400 joint_samples 29 [808089, 1048015] +processed_samples 10400 unjoint_samples 10400 joint_samples 30 [364475, 1048063] +processed_samples 10400 unjoint_samples 10400 joint_samples 29 [808089, 1048015] +processed_samples 10400 unjoint_samples 10400 joint_samples 30 [364475, 1048063] +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +processed_samples 10400 unjoint_samples 10400 joint_samples 30 [1046777, 126893] +processed_samples 10400 unjoint_samples 10400 joint_samples 30 [1046777, 126893] +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [1047379, 475264] +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [1047379, 475264] +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [234503, 1043482] +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [234503, 1043482] +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [1046111, 568118] +processed_samples 10400 unjoint_samples 10400 joint_samples 31 [1046111, 568118] +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +processed_samples 10400 unjoint_samples 10400 joint_samples 30 [1047300, 888797] +processed_samples 10400 unjoint_samples 10400 joint_samples 30 [1047300, 888797] +processed_samples 10400 unjoint_samples 10400 joint_samples 30 [821075, 1046769] +processed_samples 10400 unjoint_samples 10400 joint_samples 30 [821075, 1046769] +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x5638750043c0] mmco: unref short failure +[h264 @ 0x5638750043c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x563874a01980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x5638750043c0] mmco: unref short failure +[h264 @ 0x5638750043c0] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563875e2ca80] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed4750840] mmco: unref short failure +[h264 @ 0x55eed4750840] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [1046111, 992185] +processed_samples 10500 unjoint_samples 10500 joint_samples 30 [1030943, 121060] +[h264 @ 0x55eed7ec7980] mmco: unref short failure +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [97382, 1046769] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [1046111, 992185] +processed_samples 10500 unjoint_samples 10500 joint_samples 30 [1030943, 121060] +[h264 @ 0x563878ccba40] mmco: unref short failure +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [97382, 1046769] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [513455, 1043482] +processed_samples 10500 unjoint_samples 10500 joint_samples 30 [1046777, 426111] +processed_samples 10500 unjoint_samples 10500 joint_samples 30 [702187, 1048063] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [513455, 1043482] +processed_samples 10500 unjoint_samples 10500 joint_samples 30 [1046777, 426111] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [1047300, 298088] +processed_samples 10500 unjoint_samples 10500 joint_samples 30 [702187, 1048063] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [1047300, 298088] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [1047379, 707992] +processed_samples 10500 unjoint_samples 10500 joint_samples 31 [1047379, 707992] +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eedb863d80] mmco: unref short failure +[h264 @ 0x55eedb863d80] mmco: unref short failure +[h264 @ 0x56386988a480] mmco: unref short failure +[h264 @ 0x56386988a480] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638750043c0] mmco: unref short failure +[h264 @ 0x5638750043c0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638786aa400] Missing reference picture, default is 65530 +[h264 @ 0x5638786aa400] Missing reference picture, default is 65530 +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] Missing reference picture, default is 65530 +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] Missing reference picture, default is 65530 +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed3717500] Missing reference picture, default is 65530 +[h264 @ 0x55eed3717500] Missing reference picture, default is 65530 +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] Missing reference picture, default is 65530 +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] Missing reference picture, default is 65530 +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x56387483a1c0] mmco: unref short failure +[h264 @ 0x56387483a1c0] mmco: unref short failure +[h264 @ 0x56387d4ad200] mmco: unref short failure +[h264 @ 0x56387d4ad200] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x56387d4ad200] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +processed_samples 10600 unjoint_samples 10600 joint_samples 32 [372248, 1020567]processed_samples 10600 unjoint_samples 10600 joint_samples 32 [372248, 1020567] + +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [1047300, 613751] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [744143, 1043482] +processed_samples 10600 unjoint_samples 10600 joint_samples 30 [1046777, 869061] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [408098, 1046769] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [1047300, 613751] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [744143, 1043482] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [408098, 1046769] +processed_samples 10600 unjoint_samples 10600 joint_samples 30 [1046777, 869061] +processed_samples 10600 unjoint_samples 10600 joint_samples 30 [1030943, 372965] +processed_samples 10600 unjoint_samples 10600 joint_samples 30 [1030943, 372965] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [1047379, 1025515] +processed_samples 10600 unjoint_samples 10600 joint_samples 31 [1047379, 1025515] +processed_samples 10600 unjoint_samples 10600 joint_samples 30 [980074, 1048063] +processed_samples 10600 unjoint_samples 10600 joint_samples 30 [980074, 1048063] +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x5638757cae80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +processed_samples 10700 unjoint_samples 10700 joint_samples 30 [1030943, 917760] +processed_samples 10700 unjoint_samples 10700 joint_samples 30 [1030943, 917760] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [38137, 1046132] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [38137, 1046132] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [1045150, 356161] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [1045150, 356161] +processed_samples 10700 unjoint_samples 10700 joint_samples 32 [1023789, 135557] +processed_samples 10700 unjoint_samples 10700 joint_samples 32 [1023789, 135557] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [785948, 1046769] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [785948, 1046769] +processed_samples 10700 unjoint_samples 10700 joint_samples 32 [820550, 1020567] +processed_samples 10700 unjoint_samples 10700 joint_samples 32 [820550, 1020567] +processed_samples 10700 unjoint_samples 10700 joint_samples 32 [330403, 1038409] +processed_samples 10700 unjoint_samples 10700 joint_samples 32 [330403, 1038409] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [1047300, 917663] +processed_samples 10700 unjoint_samples 10700 joint_samples 31 [1047300, 917663] +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1047059, 59519] +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1047059, 59519] +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1047300, 218949] +processed_samples 10800 unjoint_samples 10800 joint_samples 31 [1035342, 332958] +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1047300, 218949] +processed_samples 10800 unjoint_samples 10800 joint_samples 33 [1046098, 72095] +processed_samples 10800 unjoint_samples 10800 joint_samples 31 [1035342, 332958] +processed_samples 10800 unjoint_samples 10800 joint_samples 33 [1046098, 72095] +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1023789, 541614] +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [1023789, 541614] +processed_samples 10800 unjoint_samples 10800 joint_samples 31 [1045150, 796359] +processed_samples 10800 unjoint_samples 10800 joint_samples 31 [364206, 1046132] +processed_samples 10800 unjoint_samples 10800 joint_samples 31 [364206, 1046132] +processed_samples 10800 unjoint_samples 10800 joint_samples 31 [1045150, 796359] +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [590914, 1038409] +processed_samples 10800 unjoint_samples 10800 joint_samples 32 [590914, 1038409] +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x55eeda7ac640] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x5638746519c0] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x5638746519c0] mmco: unref short failure +[h264 @ 0x5638746519c0] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [56358, 1027924] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [56358, 1027924] +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1047300, 584738] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1047300, 584738] +processed_samples 10900 unjoint_samples 10900 joint_samples 33 [1046098, 389775] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [998793, 1038409] +processed_samples 10900 unjoint_samples 10900 joint_samples 33 [1046098, 389775] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [998793, 1038409] +processed_samples 10900 unjoint_samples 10900 joint_samples 31 [673796, 1046132] +processed_samples 10900 unjoint_samples 10900 joint_samples 31 [673796, 1046132] +processed_samples 10900 unjoint_samples 10900 joint_samples 31 [1035342, 637309] +processed_samples 10900 unjoint_samples 10900 joint_samples 31 [1035342, 637309] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1047059, 380476] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1047059, 380476] +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1023789, 822808] +processed_samples 10900 unjoint_samples 10900 joint_samples 32 [1023789, 822808] +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed2ad2a00] mmco: unref short failure +[h264 @ 0x55eed2ad2a00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +processed_samples 11000 unjoint_samples 11000 joint_samples 33 [1047383, 320009] +processed_samples 11000 unjoint_samples 11000 joint_samples 33 [1047383, 320009] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1047059, 681044] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [423471, 1027924] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1047059, 681044] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1047300, 966840] +processed_samples 11000 unjoint_samples 11000 joint_samples 31 [1035342, 945699] +processed_samples 11000 unjoint_samples 11000 joint_samples 33 [1046098, 674781] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [423471, 1027924] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1047300, 966840] +processed_samples 11000 unjoint_samples 11000 joint_samples 31 [1035342, 945699] +processed_samples 11000 unjoint_samples 11000 joint_samples 33 [1046098, 674781] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1039351, 1040069] +processed_samples 11000 unjoint_samples 11000 joint_samples 32 [1039351, 1040069] +processed_samples 11000 unjoint_samples 11000 joint_samples 31 [1026786, 1046132] +processed_samples 11000 unjoint_samples 11000 joint_samples 31 [1026786, 1046132] +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +processed_samples 11100 unjoint_samples 11100 joint_samples 32 [129580, 1046777] +processed_samples 11100 unjoint_samples 11100 joint_samples 32 [129580, 1046777] +[h264 @ 0x5638787dbdc0] mmco: unref short failure +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [9486, 1046863] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [9486, 1046863] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1047300, 188591] +processed_samples 11100 unjoint_samples 11100 joint_samples 32 [288879, 1046132] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1047300, 188591] +processed_samples 11100 unjoint_samples 11100 joint_samples 32 [288879, 1046132] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [226160, 1047678] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [226160, 1047678] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1047383, 685881] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1047383, 685881] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1046098, 1011698] +processed_samples 11100 unjoint_samples 11100 joint_samples 33 [1046098, 1011698] +processed_samples 11100 unjoint_samples 11100 joint_samples 32 [706407, 1027924] +processed_samples 11100 unjoint_samples 11100 joint_samples 32 [706407, 1027924] +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x56387483a1c0] mmco: unref short failure +[h264 @ 0x56387483a1c0] mmco: unref short failure +[h264 @ 0x56387483a1c0] mmco: unref short failure +[h264 @ 0x56387483a1c0] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +processed_samples 11200 unjoint_samples 11200 joint_samples 34 [6994, 1047485] +processed_samples 11200 unjoint_samples 11200 joint_samples 34 [6994, 1047485] +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +processed_samples 11200 unjoint_samples 11200 joint_samples 32 [502256, 1046777] +processed_samples 11200 unjoint_samples 11200 joint_samples 32 [502256, 1046777] +processed_samples 11200 unjoint_samples 11200 joint_samples 34 [281271, 1046027] +processed_samples 11200 unjoint_samples 11200 joint_samples 34 [281271, 1046027] +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [313263, 1046863] +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [313263, 1046863] +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [504566, 1047678] +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [504566, 1047678] +processed_samples 11200 unjoint_samples 11200 joint_samples 32 [545664, 1046132] +processed_samples 11200 unjoint_samples 11200 joint_samples 32 [545664, 1046132] +processed_samples 11200 unjoint_samples 11200 joint_samples 32 [977606, 1027924] +processed_samples 11200 unjoint_samples 11200 joint_samples 32 [977606, 1027924] +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [1047300, 524270] +processed_samples 11200 unjoint_samples 11200 joint_samples 33 [1047300, 524270] +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [143033, 1046769] +processed_samples 11300 unjoint_samples 11300 joint_samples 34 [328484, 1047485] +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [143033, 1046769] +processed_samples 11300 unjoint_samples 11300 joint_samples 32 [869419, 1046132] +processed_samples 11300 unjoint_samples 11300 joint_samples 34 [328484, 1047485] +processed_samples 11300 unjoint_samples 11300 joint_samples 32 [790404, 1046777] +processed_samples 11300 unjoint_samples 11300 joint_samples 32 [790404, 1046777] +processed_samples 11300 unjoint_samples 11300 joint_samples 32 [869419, 1046132] +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [1047300, 761387] +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [789726, 1047678] +processed_samples 11300 unjoint_samples 11300 joint_samples 34 [585902, 1046027] +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [1047300, 761387] +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [789726, 1047678] +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [562925, 1046863] +processed_samples 11300 unjoint_samples 11300 joint_samples 34 [585902, 1046027] +processed_samples 11300 unjoint_samples 11300 joint_samples 33 [562925, 1046863] +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed507ad40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [1044677, 26282] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [1044677, 26282] +processed_samples 11400 unjoint_samples 11400 joint_samples 33 [1032585, 33141] +processed_samples 11400 unjoint_samples 11400 joint_samples 33 [1032585, 33141] +processed_samples 11400 unjoint_samples 11400 joint_samples 33 [179411, 1046878] +processed_samples 11400 unjoint_samples 11400 joint_samples 33 [179411, 1046878] +processed_samples 11400 unjoint_samples 11400 joint_samples 33 [415137, 1046769] +processed_samples 11400 unjoint_samples 11400 joint_samples 33 [415137, 1046769] +processed_samples 11400 unjoint_samples 11400 joint_samples 33 [821744, 1046863] +processed_samples 11400 unjoint_samples 11400 joint_samples 33 [821744, 1046863] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [888906, 1046027] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [888906, 1046027] +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [21712, 1027612] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [21712, 1027612] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [692923, 1047485] +processed_samples 11400 unjoint_samples 11400 joint_samples 34 [692923, 1047485] +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed4750840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +processed_samples 11500 unjoint_samples 11500 joint_samples 33 [1032585, 309772] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [84586, 1046863] +processed_samples 11500 unjoint_samples 11500 joint_samples 35 [144908, 1046923] +processed_samples 11500 unjoint_samples 11500 joint_samples 33 [1032585, 309772] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [84586, 1046863] +processed_samples 11500 unjoint_samples 11500 joint_samples 35 [144908, 1046923] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1044677, 365148] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [435165, 1027612] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [435165, 1027612] +processed_samples 11500 unjoint_samples 11500 joint_samples 33 [532127, 1046878] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1044677, 365148] +processed_samples 11500 unjoint_samples 11500 joint_samples 33 [532127, 1046878] +processed_samples 11500 unjoint_samples 11500 joint_samples 33 [618439, 1046769] +processed_samples 11500 unjoint_samples 11500 joint_samples 33 [618439, 1046769] +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1029614, 1047485] +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +processed_samples 11500 unjoint_samples 11500 joint_samples 34 [1029614, 1047485] +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +processed_samples 11600 unjoint_samples 11600 joint_samples 33 [939295, 1046769] +processed_samples 11600 unjoint_samples 11600 joint_samples 35 [1047829, 308479] +processed_samples 11600 unjoint_samples 11600 joint_samples 33 [1032585, 568660] +processed_samples 11600 unjoint_samples 11600 joint_samples 33 [921125, 1046878] +processed_samples 11600 unjoint_samples 11600 joint_samples 33 [939295, 1046769] +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [1044677, 664610] +processed_samples 11600 unjoint_samples 11600 joint_samples 35 [1047829, 308479] +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [383585, 1046863] +processed_samples 11600 unjoint_samples 11600 joint_samples 35 [386120, 1046923] +processed_samples 11600 unjoint_samples 11600 joint_samples 33 [1032585, 568660] +processed_samples 11600 unjoint_samples 11600 joint_samples 33 [921125, 1046878] +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [800188, 1027612] +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [383585, 1046863] +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [1044677, 664610] +processed_samples 11600 unjoint_samples 11600 joint_samples 35 [386120, 1046923] +processed_samples 11600 unjoint_samples 11600 joint_samples 34 [800188, 1027612] +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563874c4b200] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4750840] mmco: unref short failure +[h264 @ 0x55eed4750840] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed4750840] mmco: unref short failure +[h264 @ 0x55eed4750840] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eedc5533c0] mmco: unref short failure +[h264 @ 0x55eedc5533c0] mmco: unref short failure +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [1047829, 670877] +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +processed_samples 11700 unjoint_samples 11700 joint_samples 34 [191825, 1046878] +processed_samples 11700 unjoint_samples 11700 joint_samples 34 [766142, 1046863] +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [1047206, 66668] +processed_samples 11700 unjoint_samples 11700 joint_samples 34 [286262, 1047118] +processed_samples 11700 unjoint_samples 11700 joint_samples 33 [1032585, 939330] +processed_samples 11700 unjoint_samples 11700 joint_samples 34 [1044677, 1034840] +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [675361, 1046923] +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [1047829, 670877] +processed_samples 11700 unjoint_samples 11700 joint_samples 34 [191825, 1046878] +processed_samples 11700 unjoint_samples 11700 joint_samples 34 [766142, 1046863] +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [1047206, 66668] +processed_samples 11700 unjoint_samples 11700 joint_samples 34 [286262, 1047118] +processed_samples 11700 unjoint_samples 11700 joint_samples 34 [1044677, 1034840] +processed_samples 11700 unjoint_samples 11700 joint_samples 33 [1032585, 939330] +processed_samples 11700 unjoint_samples 11700 joint_samples 35 [675361, 1046923] +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x5638751ede00] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x5638751ede00] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [11480, 1046897] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [11480, 1046897] +processed_samples 11800 unjoint_samples 11800 joint_samples 34 [169475, 1046595] +processed_samples 11800 unjoint_samples 11800 joint_samples 34 [169475, 1046595] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1046929, 333062] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1047206, 402160] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1046929, 333062] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1047206, 402160] +processed_samples 11800 unjoint_samples 11800 joint_samples 34 [576137, 1047118] +processed_samples 11800 unjoint_samples 11800 joint_samples 34 [626819, 1046878] +processed_samples 11800 unjoint_samples 11800 joint_samples 34 [626819, 1046878] +processed_samples 11800 unjoint_samples 11800 joint_samples 34 [576137, 1047118] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1003604, 1046923] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1047829, 1033441] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1003604, 1046923] +processed_samples 11800 unjoint_samples 11800 joint_samples 35 [1047829, 1033441] +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed3027c40] Missing reference picture, default is 65530 +[h264 @ 0x55eed3027c40] Missing reference picture, default is 65530 +[h264 @ 0x563875db3980] Missing reference picture, default is 65530 +[h264 @ 0x563875db3980] Missing reference picture, default is 65530 +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] Missing reference picture, default is 65530 +[h264 @ 0x563875db3980] [h264 @ 0x55eed3027c40] Missing reference picture, default is 65530 +Missing reference picture, default is 65530 +[h264 @ 0x563875db3980] Missing reference picture, default is 65530 +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875069800] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x56386988a480] mmco: unref short failure +[h264 @ 0x56386988a480] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875231d40] [h264 @ 0x55eed310b440] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 11900 unjoint_samples 11900 joint_samples 36 [312423, 1046544] +processed_samples 11900 unjoint_samples 11900 joint_samples 36 [312423, 1046544] +processed_samples 11900 unjoint_samples 11900 joint_samples 34 [879798, 1047118] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [358647, 1046897] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [358647, 1046897] +processed_samples 11900 unjoint_samples 11900 joint_samples 34 [879798, 1047118] +processed_samples 11900 unjoint_samples 11900 joint_samples 36 [1031376, 359414] +processed_samples 11900 unjoint_samples 11900 joint_samples 36 [1031376, 359414] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [1046929, 699922] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [1046929, 699922] +processed_samples 11900 unjoint_samples 11900 joint_samples 34 [514921, 1046595] +processed_samples 11900 unjoint_samples 11900 joint_samples 34 [514921, 1046595] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [1047206, 649618] +processed_samples 11900 unjoint_samples 11900 joint_samples 35 [1047206, 649618] +processed_samples 11900 unjoint_samples 11900 joint_samples 34 [955842, 1046878] +processed_samples 11900 unjoint_samples 11900 joint_samples 34 [955842, 1046878] +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eedb863d80] mmco: unref short failure +[h264 @ 0x55eedb863d80] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563878e40940] mmco: unref short failure +[h264 @ 0x563878e40940] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [1040114, 150851] +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [1040114, 150851] +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [1046067, 232441] +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [1046067, 232441] +processed_samples 12000 unjoint_samples 12000 joint_samples 34 [804566, 1046595] +processed_samples 12000 unjoint_samples 12000 joint_samples 34 [804566, 1046595] +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [706641, 1046897] +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [706641, 1046897] +processed_samples 12000 unjoint_samples 12000 joint_samples 36 [668690, 1046544] +processed_samples 12000 unjoint_samples 12000 joint_samples 36 [668690, 1046544] +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [1046929, 993014] +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [1046929, 993014] +processed_samples 12000 unjoint_samples 12000 joint_samples 36 [1031376, 925919] +processed_samples 12000 unjoint_samples 12000 joint_samples 36 [1031376, 925919] +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [1047206, 923776] +processed_samples 12000 unjoint_samples 12000 joint_samples 35 [1047206, 923776] +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x56387636fd40] mmco: unref short failure +[h264 @ 0x56387636fd40] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +processed_samples 12100 unjoint_samples 12100 joint_samples 37 [1046735, 63068] +processed_samples 12100 unjoint_samples 12100 joint_samples 37 [1046735, 63068] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [28778, 1046897] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [28778, 1046897] +processed_samples 12100 unjoint_samples 12100 joint_samples 35 [1022022, 120969] +processed_samples 12100 unjoint_samples 12100 joint_samples 35 [1022022, 120969] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [1047206, 101050] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [1047206, 101050] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [1046929, 181826] +processed_samples 12100 unjoint_samples 12100 joint_samples 36 [1046929, 181826] +processed_samples 12100 unjoint_samples 12100 joint_samples 37 [222540, 1039502] +processed_samples 12100 unjoint_samples 12100 joint_samples 37 [222540, 1039502] +processed_samples 12100 unjoint_samples 12100 joint_samples 35 [1040114, 561593] +processed_samples 12100 unjoint_samples 12100 joint_samples 35 [1040114, 561593] +processed_samples 12100 unjoint_samples 12100 joint_samples 35 [1046067, 680809] +processed_samples 12100 unjoint_samples 12100 joint_samples 35 [1046067, 680809] +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875e2ca80] mmco: unref short failure +[h264 @ 0x563875e2ca80] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875e2ca80] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [157704, 1013451] +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [157704, 1013451] +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +processed_samples 12200 unjoint_samples 12200 joint_samples 35 [1022022, 434366] +processed_samples 12200 unjoint_samples 12200 joint_samples 35 [1022022, 434366] +processed_samples 12200 unjoint_samples 12200 joint_samples 37 [1046735, 422115] +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +processed_samples 12200 unjoint_samples 12200 joint_samples 35 [1040114, 842727] +processed_samples 12200 unjoint_samples 12200 joint_samples 35 [1040114, 842727] +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +processed_samples 12200 unjoint_samples 12200 joint_samples 37 [1046735, 422115] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [386471, 1046897] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [386471, 1046897] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [1046929, 513256] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [1047206, 432975] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [1047206, 432975] +processed_samples 12200 unjoint_samples 12200 joint_samples 36 [1046929, 513256] +processed_samples 12200 unjoint_samples 12200 joint_samples 37 [604470, 1039502] +processed_samples 12200 unjoint_samples 12200 joint_samples 37 [604470, 1039502] +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +processed_samples 12300 unjoint_samples 12300 joint_samples 37 [1046735, 692161] +processed_samples 12300 unjoint_samples 12300 joint_samples 37 [1046735, 692161] +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [547298, 1013451] +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [547298, 1013451] +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [18594, 1048063] +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [18594, 1048063] +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +processed_samples 12300 unjoint_samples 12300 joint_samples 35 [1022022, 700152] +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [1047206, 753030] +processed_samples 12300 unjoint_samples 12300 joint_samples 35 [1022022, 700152] +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [1047206, 753030] +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [727626, 1046897] +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [727626, 1046897] +processed_samples 12300 unjoint_samples 12300 joint_samples 37 [924503, 1039502] +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [1046929, 751678] +processed_samples 12300 unjoint_samples 12300 joint_samples 36 [1046929, 751678] +processed_samples 12300 unjoint_samples 12300 joint_samples 37 [924503, 1039502] +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +processed_samples 12400 unjoint_samples 12400 joint_samples 38 [119136, 1006254] +processed_samples 12400 unjoint_samples 12400 joint_samples 38 [119136, 1006254] +processed_samples 12400 unjoint_samples 12400 joint_samples 35 [1022022, 935784] +processed_samples 12400 unjoint_samples 12400 joint_samples 35 [1022022, 935784] +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [72817, 1027493] +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [72817, 1027493] +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [117255, 1046299] +processed_samples 12400 unjoint_samples 12400 joint_samples 37 [117255, 1046299] +processed_samples 12400 unjoint_samples 12400 joint_samples 38 [109976, 1048289] +processed_samples 12400 unjoint_samples 12400 joint_samples 38 [109976, 1048289] +processed_samples 12400 unjoint_samples 12400 joint_samples 36 [325682, 1048063] +processed_samples 12400 unjoint_samples 12400 joint_samples 36 [816373, 1013451] +processed_samples 12400 unjoint_samples 12400 joint_samples 36 [816373, 1013451] +processed_samples 12400 unjoint_samples 12400 joint_samples 36 [325682, 1048063] +processed_samples 12400 unjoint_samples 12400 joint_samples 36 [995332, 1046897] +processed_samples 12400 unjoint_samples 12400 joint_samples 36 [995332, 1046897] +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x56387483a1c0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [126089, 1025754] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [126089, 1025754] +processed_samples 12500 unjoint_samples 12500 joint_samples 36 [1047918, 99029] +processed_samples 12500 unjoint_samples 12500 joint_samples 36 [1047918, 99029] +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [220656, 1046897] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [220656, 1046897] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [399785, 1027493] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [399785, 1027493] +processed_samples 12500 unjoint_samples 12500 joint_samples 38 [508470, 1006254] +processed_samples 12500 unjoint_samples 12500 joint_samples 38 [508470, 1006254] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [414473, 1046299] +processed_samples 12500 unjoint_samples 12500 joint_samples 37 [414473, 1046299] +processed_samples 12500 unjoint_samples 12500 joint_samples 38 [492426, 1048289] +processed_samples 12500 unjoint_samples 12500 joint_samples 38 [492426, 1048289] +processed_samples 12500 unjoint_samples 12500 joint_samples 36 [584341, 1048063] +processed_samples 12500 unjoint_samples 12500 joint_samples 36 [584341, 1048063] +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eeca6de340] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4d30700] mmco: unref short failure +[h264 @ 0x55eed4d30700] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed4d30700] mmco: unref short failure +[h264 @ 0x55eed4d30700] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eedb863d80] mmco: unref short failure +[h264 @ 0x55eedb863d80] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +processed_samples 12600 unjoint_samples 12600 joint_samples 36 [910138, 1048063] +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [803659, 1027493] +processed_samples 12600 unjoint_samples 12600 joint_samples 36 [910138, 1048063] +processed_samples 12600 unjoint_samples 12600 joint_samples 36 [1047918, 470478] +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [363285, 1025754] +processed_samples 12600 unjoint_samples 12600 joint_samples 38 [843752, 1006254] +processed_samples 12600 unjoint_samples 12600 joint_samples 36 [1047918, 470478] +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [363285, 1025754] +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [803659, 1027493] +processed_samples 12600 unjoint_samples 12600 joint_samples 38 [843752, 1006254] +[h264 @ 0x55eed7529700] mmco: unref short failure +processed_samples 12600 unjoint_samples 12600 joint_samples 38 [821737, 1048289] +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [673423, 1046897] +processed_samples 12600 unjoint_samples 12600 joint_samples 38 [821737, 1048289] +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [673423, 1046897] +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [660857, 1046299] +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +processed_samples 12600 unjoint_samples 12600 joint_samples 37 [660857, 1046299] +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 12700 unjoint_samples 12700 joint_samples 38 [111592, 1032128] +processed_samples 12700 unjoint_samples 12700 joint_samples 38 [111592, 1032128] +processed_samples 12700 unjoint_samples 12700 joint_samples 37 [1043780, 152568] +processed_samples 12700 unjoint_samples 12700 joint_samples 37 [1043780, 152568] +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +processed_samples 12700 unjoint_samples 12700 joint_samples 39 [1039168, 70829] +processed_samples 12700 unjoint_samples 12700 joint_samples 39 [1039168, 70829] +processed_samples 12700 unjoint_samples 12700 joint_samples 39 [1046612, 28619] +processed_samples 12700 unjoint_samples 12700 joint_samples 39 [1046612, 28619] +processed_samples 12700 unjoint_samples 12700 joint_samples 37 [622661, 1025754] +processed_samples 12700 unjoint_samples 12700 joint_samples 37 [622661, 1025754] +processed_samples 12700 unjoint_samples 12700 joint_samples 36 [1047918, 792835] +processed_samples 12700 unjoint_samples 12700 joint_samples 36 [1047918, 792835] +processed_samples 12700 unjoint_samples 12700 joint_samples 37 [1045022, 1046897] +processed_samples 12700 unjoint_samples 12700 joint_samples 37 [1045022, 1046897] +processed_samples 12700 unjoint_samples 12700 joint_samples 37 [996541, 1046299] +processed_samples 12700 unjoint_samples 12700 joint_samples 37 [996541, 1046299] +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x5638757cae80] mmco: unref short failure +[h264 @ 0x5638757cae80] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x5638757cae80] mmco: unref short failure +[h264 @ 0x5638757cae80] mmco: unref short failure +[h264 @ 0x563875069800] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563874c4b200] mmco: unref short failure +[h264 @ 0x563874c4b200] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +processed_samples 12800 unjoint_samples 12800 joint_samples 37 [45983, 1046710] +processed_samples 12800 unjoint_samples 12800 joint_samples 37 [45983, 1046710] +processed_samples 12800 unjoint_samples 12800 joint_samples 39 [1046612, 491297] +processed_samples 12800 unjoint_samples 12800 joint_samples 39 [1046612, 491297] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [330658, 1046897] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [330658, 1046897] +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +processed_samples 12800 unjoint_samples 12800 joint_samples 37 [1043780, 403570] +processed_samples 12800 unjoint_samples 12800 joint_samples 37 [1043780, 403570] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [1023679, 272124] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [1023679, 272124] +processed_samples 12800 unjoint_samples 12800 joint_samples 39 [1039168, 274892] +processed_samples 12800 unjoint_samples 12800 joint_samples 37 [982428, 1025754] +processed_samples 12800 unjoint_samples 12800 joint_samples 39 [1039168, 274892] +processed_samples 12800 unjoint_samples 12800 joint_samples 37 [982428, 1025754] +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [417180, 1032128] +processed_samples 12800 unjoint_samples 12800 joint_samples 38 [417180, 1032128] +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed2f33740] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3d8aa00] mmco: unref short failure +[h264 @ 0x55eed3d8aa00] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed2f56000] mmco: unref short failure +[h264 @ 0x55eed2f56000] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x55eed507ad80] mmco: unref short failure +[h264 @ 0x55eed507ad80] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +processed_samples 12900 unjoint_samples 12900 joint_samples 39 [1046612, 796945] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [1038025, 199614] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [1038025, 199614] +processed_samples 12900 unjoint_samples 12900 joint_samples 39 [1046612, 796945] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [754466, 1046897] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [754466, 1046897] +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +processed_samples 12900 unjoint_samples 12900 joint_samples 37 [1043780, 622587] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [878538, 1032128] +processed_samples 12900 unjoint_samples 12900 joint_samples 39 [1039168, 503374] +processed_samples 12900 unjoint_samples 12900 joint_samples 37 [1043780, 622587] +processed_samples 12900 unjoint_samples 12900 joint_samples 37 [322188, 1046710] +processed_samples 12900 unjoint_samples 12900 joint_samples 39 [1039168, 503374] +processed_samples 12900 unjoint_samples 12900 joint_samples 37 [322188, 1046710] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [878538, 1032128] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [1023679, 616883] +processed_samples 12900 unjoint_samples 12900 joint_samples 38 [1023679, 616883] +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x56387d4ad200] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +processed_samples 13000 unjoint_samples 13000 joint_samples 40 [62977, 1030681] +processed_samples 13000 unjoint_samples 13000 joint_samples 40 [62977, 1030681] +processed_samples 13000 unjoint_samples 13000 joint_samples 39 [1037701, 323150] +processed_samples 13000 unjoint_samples 13000 joint_samples 39 [1037701, 323150] +processed_samples 13000 unjoint_samples 13000 joint_samples 39 [920267, 273930] +processed_samples 13000 unjoint_samples 13000 joint_samples 39 [920267, 273930] +processed_samples 13000 unjoint_samples 13000 joint_samples 38 [1038025, 473011] +processed_samples 13000 unjoint_samples 13000 joint_samples 38 [1038025, 473011] +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +processed_samples 13000 unjoint_samples 13000 joint_samples 37 [669484, 1046710] +processed_samples 13000 unjoint_samples 13000 joint_samples 37 [669484, 1046710] +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +processed_samples 13000 unjoint_samples 13000 joint_samples 37 [1043780, 899606] +processed_samples 13000 unjoint_samples 13000 joint_samples 37 [1043780, 899606] +processed_samples 13000 unjoint_samples 13000 joint_samples 38 [1023679, 901268] +processed_samples 13000 unjoint_samples 13000 joint_samples 38 [1023679, 901268] +processed_samples 13000 unjoint_samples 13000 joint_samples 39 [1039168, 904311] +processed_samples 13000 unjoint_samples 13000 joint_samples 39 [1039168, 904311] +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x56387d4ad200] mmco: unref short failure +[h264 @ 0x56387d4ad200] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed74aec40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed74aec40] mmco: unref short failure +[h264 @ 0x55eed74aec40] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638751ede00] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638751ede00] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +processed_samples 13100 unjoint_samples 13100 joint_samples 40 [419114, 1030681] +processed_samples 13100 unjoint_samples 13100 joint_samples 40 [419114, 1030681] +processed_samples 13100 unjoint_samples 13100 joint_samples 38 [1038025, 742625] +processed_samples 13100 unjoint_samples 13100 joint_samples 38 [144656, 1043439] +processed_samples 13100 unjoint_samples 13100 joint_samples 38 [144656, 1043439] +processed_samples 13100 unjoint_samples 13100 joint_samples 38 [1038025, 742625] +processed_samples 13100 unjoint_samples 13100 joint_samples 40 [1044209, 316341] +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +processed_samples 13100 unjoint_samples 13100 joint_samples 40 [1044209, 316341] +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [142976, 1047624] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [142976, 1047624] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [1037701, 645937] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [1037701, 645937] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [920267, 572766] +processed_samples 13100 unjoint_samples 13100 joint_samples 39 [920267, 572766] +processed_samples 13100 unjoint_samples 13100 joint_samples 37 [968283, 1046710] +processed_samples 13100 unjoint_samples 13100 joint_samples 37 [968283, 1046710] +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed74aec40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +processed_samples 13200 unjoint_samples 13200 joint_samples 40 [718624, 1030681] +processed_samples 13200 unjoint_samples 13200 joint_samples 38 [230191, 1046710] +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [396987, 1047624] +processed_samples 13200 unjoint_samples 13200 joint_samples 40 [1044209, 684848] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [1037701, 854320] +processed_samples 13200 unjoint_samples 13200 joint_samples 38 [489446, 1043439] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [920267, 863242] +processed_samples 13200 unjoint_samples 13200 joint_samples 38 [1038025, 1009578] +processed_samples 13200 unjoint_samples 13200 joint_samples 40 [718624, 1030681] +processed_samples 13200 unjoint_samples 13200 joint_samples 38 [230191, 1046710] +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [396987, 1047624] +processed_samples 13200 unjoint_samples 13200 joint_samples 40 [1044209, 684848] +processed_samples 13200 unjoint_samples 13200 joint_samples 38 [489446, 1043439] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [1037701, 854320] +processed_samples 13200 unjoint_samples 13200 joint_samples 39 [920267, 863242] +processed_samples 13200 unjoint_samples 13200 joint_samples 38 [1038025, 1009578] +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875069800] mmco: unref short failure +[h264 @ 0x563875069800] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed3490980] [h264 @ 0x563874c67c00] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x563874c67c00] [h264 @ 0x55eed3490980] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x55eed74aec40] mmco: unref short failure +[h264 @ 0x55eed74aec40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x5638751ede00] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x5638751ede00] mmco: unref short failure +[h264 @ 0x5638751ede00] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +processed_samples 13300 unjoint_samples 13300 joint_samples 39 [1044631, 280581] +processed_samples 13300 unjoint_samples 13300 joint_samples 40 [1044209, 1034287] +processed_samples 13300 unjoint_samples 13300 joint_samples 40 [123690, 1029741] +processed_samples 13300 unjoint_samples 13300 joint_samples 38 [510361, 1046710] +processed_samples 13300 unjoint_samples 13300 joint_samples 40 [47827, 1046156] +processed_samples 13300 unjoint_samples 13300 joint_samples 39 [1044631, 280581] +processed_samples 13300 unjoint_samples 13300 joint_samples 38 [510361, 1046710] +processed_samples 13300 unjoint_samples 13300 joint_samples 40 [1044209, 1034287] +processed_samples 13300 unjoint_samples 13300 joint_samples 40 [123690, 1029741] +processed_samples 13300 unjoint_samples 13300 joint_samples 40 [47827, 1046156] +processed_samples 13300 unjoint_samples 13300 joint_samples 38 [763130, 1043439] +processed_samples 13300 unjoint_samples 13300 joint_samples 38 [763130, 1043439] +processed_samples 13300 unjoint_samples 13300 joint_samples 40 [1027811, 1030681] +processed_samples 13300 unjoint_samples 13300 joint_samples 40 [1027811, 1030681] +processed_samples 13300 unjoint_samples 13300 joint_samples 39 [692242, 1047624] +[h264 @ 0x563875706fc0] mmco: unref short failure +processed_samples 13300 unjoint_samples 13300 joint_samples 39 [692242, 1047624] +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x5638751ede00] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +processed_samples 13400 unjoint_samples 13400 joint_samples 41 [1030683, 364591] +processed_samples 13400 unjoint_samples 13400 joint_samples 41 [1030683, 364591] +processed_samples 13400 unjoint_samples 13400 joint_samples 39 [1044783, 20050] +processed_samples 13400 unjoint_samples 13400 joint_samples 39 [1044783, 20050] +processed_samples 13400 unjoint_samples 13400 joint_samples 41 [1046496, 357566] +processed_samples 13400 unjoint_samples 13400 joint_samples 39 [1044631, 537174] +processed_samples 13400 unjoint_samples 13400 joint_samples 39 [1044631, 537174] +processed_samples 13400 unjoint_samples 13400 joint_samples 41 [1046496, 357566] +processed_samples 13400 unjoint_samples 13400 joint_samples 38 [853762, 1046710] +processed_samples 13400 unjoint_samples 13400 joint_samples 39 [1006579, 1047624] +processed_samples 13400 unjoint_samples 13400 joint_samples 38 [853762, 1046710] +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [328939, 1046156] +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [328939, 1046156] +processed_samples 13400 unjoint_samples 13400 joint_samples 39 [1006579, 1047624] +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [351865, 1029741] +processed_samples 13400 unjoint_samples 13400 joint_samples 40 [351865, 1029741] +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [727285, 1046156] +processed_samples 13500 unjoint_samples 13500 joint_samples 39 [119668, 1046710] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [630512, 1029741] +processed_samples 13500 unjoint_samples 13500 joint_samples 39 [1044783, 362982] +processed_samples 13500 unjoint_samples 13500 joint_samples 41 [1030683, 622786] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [1046777, 245434] +processed_samples 13500 unjoint_samples 13500 joint_samples 39 [1044631, 801184] +processed_samples 13500 unjoint_samples 13500 joint_samples 39 [119668, 1046710] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [727285, 1046156] +processed_samples 13500 unjoint_samples 13500 joint_samples 41 [1030683, 622786] +[h264 @ 0x55eed310b440] mmco: unref short failure +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [1046777, 245434] +processed_samples 13500 unjoint_samples 13500 joint_samples 40 [630512, 1029741] +processed_samples 13500 unjoint_samples 13500 joint_samples 39 [1044783, 362982] +processed_samples 13500 unjoint_samples 13500 joint_samples 41 [1046496, 867513] +[h264 @ 0x55eed4e5f300] mmco: unref short failure +processed_samples 13500 unjoint_samples 13500 joint_samples 41 [1046496, 867513] +processed_samples 13500 unjoint_samples 13500 joint_samples 39 [1044631, 801184] +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563876026bc0] mmco: unref short failure +[h264 @ 0x563876026bc0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563876026bc0] mmco: unref short failure +[h264 @ 0x563876026bc0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eedbcf1c40] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +processed_samples 13600 unjoint_samples 13600 joint_samples 40 [119846, 1025104] +processed_samples 13600 unjoint_samples 13600 joint_samples 40 [119846, 1025104] +processed_samples 13600 unjoint_samples 13600 joint_samples 39 [1044783, 653659] +processed_samples 13600 unjoint_samples 13600 joint_samples 39 [1044783, 653659] +processed_samples 13600 unjoint_samples 13600 joint_samples 41 [1030683, 876750] +processed_samples 13600 unjoint_samples 13600 joint_samples 39 [392661, 1046710] +processed_samples 13600 unjoint_samples 13600 joint_samples 39 [392661, 1046710] +processed_samples 13600 unjoint_samples 13600 joint_samples 41 [1030683, 876750] +processed_samples 13600 unjoint_samples 13600 joint_samples 42 [131149, 1013926] +processed_samples 13600 unjoint_samples 13600 joint_samples 42 [131149, 1013926] +processed_samples 13600 unjoint_samples 13600 joint_samples 40 [1046777, 465547] +processed_samples 13600 unjoint_samples 13600 joint_samples 40 [1046777, 465547] +processed_samples 13600 unjoint_samples 13600 joint_samples 40 [942251, 1029741] +processed_samples 13600 unjoint_samples 13600 joint_samples 40 [942251, 1029741] +processed_samples 13600 unjoint_samples 13600 joint_samples 40 [970986, 1046156] +[h264 @ 0x55eed49eb540] mmco: unref short failure +processed_samples 13600 unjoint_samples 13600 joint_samples 40 [970986, 1046156] +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563874798580] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eeda0a8a00] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +processed_samples 13700 unjoint_samples 13700 joint_samples 42 [1044943, 146407] +processed_samples 13700 unjoint_samples 13700 joint_samples 42 [1044943, 146407] +processed_samples 13700 unjoint_samples 13700 joint_samples 40 [483347, 1025104] +processed_samples 13700 unjoint_samples 13700 joint_samples 40 [483347, 1025104] +processed_samples 13700 unjoint_samples 13700 joint_samples 41 [1046703, 226385] +processed_samples 13700 unjoint_samples 13700 joint_samples 41 [1046703, 226385] +processed_samples 13700 unjoint_samples 13700 joint_samples 41 [1046379, 243344] +processed_samples 13700 unjoint_samples 13700 joint_samples 41 [1046379, 243344] +processed_samples 13700 unjoint_samples 13700 joint_samples 39 [670471, 1046710] +processed_samples 13700 unjoint_samples 13700 joint_samples 39 [670471, 1046710] +processed_samples 13700 unjoint_samples 13700 joint_samples 42 [532134, 1013926] +processed_samples 13700 unjoint_samples 13700 joint_samples 40 [1046777, 802520] +processed_samples 13700 unjoint_samples 13700 joint_samples 42 [532134, 1013926] +processed_samples 13700 unjoint_samples 13700 joint_samples 40 [1046777, 802520] +processed_samples 13700 unjoint_samples 13700 joint_samples 39 [1044783, 1021377] +processed_samples 13700 unjoint_samples 13700 joint_samples 39 [1044783, 1021377] +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eeda7ac640] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +processed_samples 13800 unjoint_samples 13800 joint_samples 40 [803072, 1025104] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [1046379, 550229] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [178455, 1034679] +processed_samples 13800 unjoint_samples 13800 joint_samples 40 [803072, 1025104] +processed_samples 13800 unjoint_samples 13800 joint_samples 40 [206375, 1046523] +processed_samples 13800 unjoint_samples 13800 joint_samples 40 [206375, 1046523] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [178455, 1034679] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [1046703, 576497] +processed_samples 13800 unjoint_samples 13800 joint_samples 42 [903300, 1013926] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [1046379, 550229] +processed_samples 13800 unjoint_samples 13800 joint_samples 42 [1044943, 736496] +processed_samples 13800 unjoint_samples 13800 joint_samples 41 [1046703, 576497] +processed_samples 13800 unjoint_samples 13800 joint_samples 42 [1044943, 736496] +processed_samples 13800 unjoint_samples 13800 joint_samples 39 [1034688, 1046710] +processed_samples 13800 unjoint_samples 13800 joint_samples 42 [903300, 1013926] +processed_samples 13800 unjoint_samples 13800 joint_samples 39 [1034688, 1046710] +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed4d30700] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563876040940] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +processed_samples 13900 unjoint_samples 13900 joint_samples 43 [1047520, 42949] +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +processed_samples 13900 unjoint_samples 13900 joint_samples 43 [1047520, 42949] +processed_samples 13900 unjoint_samples 13900 joint_samples 43 [162913, 1028109] +processed_samples 13900 unjoint_samples 13900 joint_samples 43 [162913, 1028109] +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [415303, 1034679] +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [415303, 1034679] +processed_samples 13900 unjoint_samples 13900 joint_samples 40 [285992, 1046710] +processed_samples 13900 unjoint_samples 13900 joint_samples 40 [285992, 1046710] +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [1046703, 813648] +processed_samples 13900 unjoint_samples 13900 joint_samples 40 [603948, 1046523] +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [1046379, 855802] +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [1046379, 855802] +processed_samples 13900 unjoint_samples 13900 joint_samples 41 [1046703, 813648] +processed_samples 13900 unjoint_samples 13900 joint_samples 40 [603948, 1046523] +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x563875f3df00] [h264 @ 0x55eed5bee740] mmco: unref short failure +mmco: unref short failure +processed_samples 13900 unjoint_samples 13900 joint_samples 40 [1035812, 1035122] +processed_samples 13900 unjoint_samples 13900 joint_samples 40 [1035812, 1035122] +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638757cae80] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +processed_samples 14000 unjoint_samples 14000 joint_samples 41 [445712, 1037357] +processed_samples 14000 unjoint_samples 14000 joint_samples 41 [445712, 1037357] +processed_samples 14000 unjoint_samples 14000 joint_samples 40 [563011, 1046710] +processed_samples 14000 unjoint_samples 14000 joint_samples 43 [1047520, 349966] +processed_samples 14000 unjoint_samples 14000 joint_samples 40 [563011, 1046710] +processed_samples 14000 unjoint_samples 14000 joint_samples 42 [61380, 1045060] +processed_samples 14000 unjoint_samples 14000 joint_samples 42 [61380, 1045060] +processed_samples 14000 unjoint_samples 14000 joint_samples 43 [1047520, 349966] +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +processed_samples 14000 unjoint_samples 14000 joint_samples 43 [514938, 1028109] +processed_samples 14000 unjoint_samples 14000 joint_samples 43 [514938, 1028109] +processed_samples 14000 unjoint_samples 14000 joint_samples 41 [752456, 1034679] +processed_samples 14000 unjoint_samples 14000 joint_samples 40 [1040457, 1046523] +processed_samples 14000 unjoint_samples 14000 joint_samples 40 [1040457, 1046523] +processed_samples 14000 unjoint_samples 14000 joint_samples 42 [2421, 1046718] +processed_samples 14000 unjoint_samples 14000 joint_samples 41 [752456, 1034679] +processed_samples 14000 unjoint_samples 14000 joint_samples 42 [2421, 1046718] +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [282125, 1046718] +processed_samples 14100 unjoint_samples 14100 joint_samples 41 [1043709, 310036] +processed_samples 14100 unjoint_samples 14100 joint_samples 43 [847905, 1028109] +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [352012, 1045060] +processed_samples 14100 unjoint_samples 14100 joint_samples 43 [1047520, 594037] +processed_samples 14100 unjoint_samples 14100 joint_samples 41 [734064, 1037357] +processed_samples 14100 unjoint_samples 14100 joint_samples 40 [854654, 1046710] +[h264 @ 0x55eed3a5e240] mmco: unref short failure +processed_samples 14100 unjoint_samples 14100 joint_samples 41 [990945, 1034679] +[h264 @ 0x55eed7772b40] mmco: unref short failure +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [282125, 1046718] +processed_samples 14100 unjoint_samples 14100 joint_samples 41 [1043709, 310036] +processed_samples 14100 unjoint_samples 14100 joint_samples 42 [352012, 1045060] +processed_samples 14100 unjoint_samples 14100 joint_samples 43 [1047520, 594037] +processed_samples 14100 unjoint_samples 14100 joint_samples 43 [847905, 1028109] +processed_samples 14100 unjoint_samples 14100 joint_samples 41 [734064, 1037357] +processed_samples 14100 unjoint_samples 14100 joint_samples 40 [854654, 1046710] +processed_samples 14100 unjoint_samples 14100 joint_samples 41 [990945, 1034679] +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [554268, 1046718] +processed_samples 14200 unjoint_samples 14200 joint_samples 41 [75017, 1046760] +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [257191, 1046890] +processed_samples 14200 unjoint_samples 14200 joint_samples 44 [980309, 381674] +processed_samples 14200 unjoint_samples 14200 joint_samples 41 [1043709, 594321] +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [639066, 1045060] +processed_samples 14200 unjoint_samples 14200 joint_samples 41 [75017, 1046760] +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [554268, 1046718] +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [257191, 1046890] +processed_samples 14200 unjoint_samples 14200 joint_samples 44 [980309, 381674] +processed_samples 14200 unjoint_samples 14200 joint_samples 43 [1047520, 918502] +processed_samples 14200 unjoint_samples 14200 joint_samples 41 [1043709, 594321] +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [1046309, 3273] +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [639066, 1045060] +processed_samples 14200 unjoint_samples 14200 joint_samples 43 [1047520, 918502] +processed_samples 14200 unjoint_samples 14200 joint_samples 42 [1046309, 3273] +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed2f56000] mmco: unref short failure +[h264 @ 0x55eed2f56000] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x563878b2fd80] mmco: unref short failure +[h264 @ 0x563878b2fd80] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +processed_samples 14300 unjoint_samples 14300 joint_samples 44 [192995, 1047963] +processed_samples 14300 unjoint_samples 14300 joint_samples 42 [1046309, 504533] +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +processed_samples 14300 unjoint_samples 14300 joint_samples 41 [411063, 1046760] +[h264 @ 0x55eed3a89540] mmco: unref short failure +processed_samples 14300 unjoint_samples 14300 joint_samples 44 [980309, 671123] +processed_samples 14300 unjoint_samples 14300 joint_samples 42 [720373, 1046890] +processed_samples 14300 unjoint_samples 14300 joint_samples 41 [1043709, 857320] +processed_samples 14300 unjoint_samples 14300 joint_samples 42 [1003939, 1045060] +processed_samples 14300 unjoint_samples 14300 joint_samples 42 [775990, 1046718] +[h264 @ 0x55eed452d680] mmco: unref short failure +processed_samples 14300 unjoint_samples 14300 joint_samples 44 [192995, 1047963] +processed_samples 14300 unjoint_samples 14300 joint_samples 42 [1046309, 504533] +processed_samples 14300 unjoint_samples 14300 joint_samples 41 [411063, 1046760] +[h264 @ 0x563875ebb3c0] mmco: unref short failure +processed_samples 14300 unjoint_samples 14300 joint_samples 44 [980309, 671123] +processed_samples 14300 unjoint_samples 14300 joint_samples 42 [720373, 1046890] +processed_samples 14300 unjoint_samples 14300 joint_samples 41 [1043709, 857320] +processed_samples 14300 unjoint_samples 14300 joint_samples 42 [775990, 1046718] +processed_samples 14300 unjoint_samples 14300 joint_samples 42 [1003939, 1045060] +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +processed_samples 14400 unjoint_samples 14400 joint_samples 42 [1046465, 58819] +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [1045302, 56587] +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [1047478, 264108] +processed_samples 14400 unjoint_samples 14400 joint_samples 42 [1046465, 58819] +processed_samples 14400 unjoint_samples 14400 joint_samples 42 [1046309, 801598] +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [1045302, 56587] +processed_samples 14400 unjoint_samples 14400 joint_samples 43 [1047478, 264108] +processed_samples 14400 unjoint_samples 14400 joint_samples 41 [753604, 1046760] +processed_samples 14400 unjoint_samples 14400 joint_samples 42 [1046309, 801598] +processed_samples 14400 unjoint_samples 14400 joint_samples 41 [753604, 1046760] +processed_samples 14400 unjoint_samples 14400 joint_samples 44 [580146, 1047963] +processed_samples 14400 unjoint_samples 14400 joint_samples 44 [980309, 941767] +processed_samples 14400 unjoint_samples 14400 joint_samples 44 [980309, 941767] +processed_samples 14400 unjoint_samples 14400 joint_samples 44 [580146, 1047963] +processed_samples 14400 unjoint_samples 14400 joint_samples 42 [972720, 1046890] +processed_samples 14400 unjoint_samples 14400 joint_samples 42 [972720, 1046890] +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +processed_samples 14500 unjoint_samples 14500 joint_samples 45 [401389, 983562] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [1034501, 191097] +processed_samples 14500 unjoint_samples 14500 joint_samples 42 [1046465, 421629] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [1045302, 389816] +processed_samples 14500 unjoint_samples 14500 joint_samples 42 [1047635, 1048027] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [1047478, 562500] +processed_samples 14500 unjoint_samples 14500 joint_samples 45 [401389, 983562] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [1034501, 191097] +processed_samples 14500 unjoint_samples 14500 joint_samples 42 [1046465, 421629] +processed_samples 14500 unjoint_samples 14500 joint_samples 41 [1008809, 1046760] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [1045302, 389816] +processed_samples 14500 unjoint_samples 14500 joint_samples 43 [1047478, 562500] +processed_samples 14500 unjoint_samples 14500 joint_samples 42 [1047635, 1048027] +processed_samples 14500 unjoint_samples 14500 joint_samples 41 [1008809, 1046760] +processed_samples 14500 unjoint_samples 14500 joint_samples 44 [909898, 1047963] +[h264 @ 0x563875487640] mmco: unref short failure +processed_samples 14500 unjoint_samples 14500 joint_samples 44 [909898, 1047963] +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x56387d4ad200] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563875e2ca80] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed4750840] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +processed_samples 14600 unjoint_samples 14600 joint_samples 42 [1046584, 315938] +[h264 @ 0x5638786aa400] mmco: unref short failure +processed_samples 14600 unjoint_samples 14600 joint_samples 45 [1028921, 151287] +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [1047635, 477936] +processed_samples 14600 unjoint_samples 14600 joint_samples 42 [1046584, 315938] +processed_samples 14600 unjoint_samples 14600 joint_samples 45 [1028921, 151287] +[h264 @ 0x55eed6b8a040] mmco: unref short failure +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [1047635, 477936] +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [1045302, 674745] +processed_samples 14600 unjoint_samples 14600 joint_samples 45 [671643, 983562] +processed_samples 14600 unjoint_samples 14600 joint_samples 45 [671643, 983562] +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [1045302, 674745] +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [1034501, 635570] +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [1034501, 635570] +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [1047478, 870592] +processed_samples 14600 unjoint_samples 14600 joint_samples 42 [1046465, 719440] +processed_samples 14600 unjoint_samples 14600 joint_samples 42 [1046465, 719440] +processed_samples 14600 unjoint_samples 14600 joint_samples 43 [1047478, 870592] +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +processed_samples 14700 unjoint_samples 14700 joint_samples 46 [31999, 1025984] +processed_samples 14700 unjoint_samples 14700 joint_samples 46 [31999, 1025984] +processed_samples 14700 unjoint_samples 14700 joint_samples 43 [1046465, 80768] +processed_samples 14700 unjoint_samples 14700 joint_samples 43 [1046465, 80768] +processed_samples 14700 unjoint_samples 14700 joint_samples 45 [1028921, 428483] +processed_samples 14700 unjoint_samples 14700 joint_samples 45 [1028921, 428483] +processed_samples 14700 unjoint_samples 14700 joint_samples 42 [1046584, 669358] +processed_samples 14700 unjoint_samples 14700 joint_samples 42 [1046584, 669358] +processed_samples 14700 unjoint_samples 14700 joint_samples 44 [128322, 1046740] +processed_samples 14700 unjoint_samples 14700 joint_samples 44 [128322, 1046740] +processed_samples 14700 unjoint_samples 14700 joint_samples 43 [1047635, 735454] +processed_samples 14700 unjoint_samples 14700 joint_samples 43 [1047635, 735454] +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +processed_samples 14700 unjoint_samples 14700 joint_samples 44 [1045302, 17305] +processed_samples 14700 unjoint_samples 14700 joint_samples 44 [1045302, 17305] +processed_samples 14700 unjoint_samples 14700 joint_samples 43 [1034501, 947819] +processed_samples 14700 unjoint_samples 14700 joint_samples 43 [1034501, 947819] +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x56387d4ad200] mmco: unref short failure +[h264 @ 0x56387d4ad200] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563874798580] mmco: unref short failure +[h264 @ 0x563874798580] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x5638765f0700] [h264 @ 0x55eed3501780] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x5638765f0700] [h264 @ 0x55eed3501780] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x5638746519c0] mmco: unref short failure +[h264 @ 0x5638746519c0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +processed_samples 14800 unjoint_samples 14800 joint_samples 46 [309767, 1025984] +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +processed_samples 14800 unjoint_samples 14800 joint_samples 46 [309767, 1025984] +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +processed_samples 14800 unjoint_samples 14800 joint_samples 45 [1028921, 715820] +processed_samples 14800 unjoint_samples 14800 joint_samples 45 [1028921, 715820] +processed_samples 14800 unjoint_samples 14800 joint_samples 43 [1047635, 988213] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [1037901, 238453] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [1045302, 336231] +processed_samples 14800 unjoint_samples 14800 joint_samples 43 [1047635, 988213] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [1037901, 238453] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [1045302, 336231] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [446467, 1046740] +processed_samples 14800 unjoint_samples 14800 joint_samples 42 [1046584, 998358] +processed_samples 14800 unjoint_samples 14800 joint_samples 44 [446467, 1046740] +processed_samples 14800 unjoint_samples 14800 joint_samples 42 [1046584, 998358] +processed_samples 14800 unjoint_samples 14800 joint_samples 43 [1046465, 427928] +processed_samples 14800 unjoint_samples 14800 joint_samples 43 [1046465, 427928] +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x5638753c5d80] mmco: unref short failure +[h264 @ 0x55eed507ad40] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +processed_samples 14900 unjoint_samples 14900 joint_samples 46 [133914, 1020237] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [345042, 1028490] +processed_samples 14900 unjoint_samples 14900 joint_samples 46 [133914, 1020237] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [345042, 1028490] +processed_samples 14900 unjoint_samples 14900 joint_samples 43 [1046465, 836215] +processed_samples 14900 unjoint_samples 14900 joint_samples 46 [590695, 1025984] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [766695, 1046740] +processed_samples 14900 unjoint_samples 14900 joint_samples 43 [1046465, 836215] +processed_samples 14900 unjoint_samples 14900 joint_samples 46 [590695, 1025984] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [766695, 1046740] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [1037901, 534959] +processed_samples 14900 unjoint_samples 14900 joint_samples 43 [301004, 1045545] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [1045302, 642685] +processed_samples 14900 unjoint_samples 14900 joint_samples 43 [301004, 1045545] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [1037901, 534959] +processed_samples 14900 unjoint_samples 14900 joint_samples 44 [1045302, 642685] +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [634665, 1028490] +processed_samples 15000 unjoint_samples 15000 joint_samples 46 [517800, 1020237] +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [1045302, 845191] +processed_samples 15000 unjoint_samples 15000 joint_samples 46 [517800, 1020237] +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [634665, 1028490] +processed_samples 15000 unjoint_samples 15000 joint_samples 43 [624082, 1045545] +processed_samples 15000 unjoint_samples 15000 joint_samples 46 [818871, 1025984] +processed_samples 15000 unjoint_samples 15000 joint_samples 43 [624082, 1045545] +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [1046465, 23703] +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [1042561, 1046740] +processed_samples 15000 unjoint_samples 15000 joint_samples 46 [818871, 1025984] +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [1046465, 23703] +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [1045302, 845191] +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [1037901, 885047] +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [1037901, 885047] +processed_samples 15000 unjoint_samples 15000 joint_samples 44 [1042561, 1046740] +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed507ad40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +processed_samples 15100 unjoint_samples 15100 joint_samples 46 [858597, 1020237] +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [1047137, 384825] +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +processed_samples 15100 unjoint_samples 15100 joint_samples 43 [935821, 1045545] +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [123850, 1035609] +processed_samples 15100 unjoint_samples 15100 joint_samples 46 [858597, 1020237] +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [130393, 1008628] +processed_samples 15100 unjoint_samples 15100 joint_samples 44 [1046465, 350698] +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [1047137, 384825] +processed_samples 15100 unjoint_samples 15100 joint_samples 43 [935821, 1045545] +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [123850, 1035609] +processed_samples 15100 unjoint_samples 15100 joint_samples 44 [920195, 1028490] +[h264 @ 0x55eed452d680] mmco: unref short failure +processed_samples 15100 unjoint_samples 15100 joint_samples 45 [130393, 1008628] +processed_samples 15100 unjoint_samples 15100 joint_samples 47 [115726, 1048321] +processed_samples 15100 unjoint_samples 15100 joint_samples 44 [1046465, 350698] +processed_samples 15100 unjoint_samples 15100 joint_samples 44 [920195, 1028490] +[h264 @ 0x5638786f4700] mmco: unref short failure +processed_samples 15100 unjoint_samples 15100 joint_samples 47 [115726, 1048321] +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4d30700] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed4d30700] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638751ede00] mmco: unref short failure +[h264 @ 0x5638751ede00] mmco: unref short failure +[h264 @ 0x5638751ede00] mmco: unref short failure +[h264 @ 0x5638751ede00] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [215899, 1042703] +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [215899, 1042703] +processed_samples 15200 unjoint_samples 15200 joint_samples 47 [1033089, 189164] +processed_samples 15200 unjoint_samples 15200 joint_samples 44 [148769, 1045545] +processed_samples 15200 unjoint_samples 15200 joint_samples 47 [1033089, 189164] +processed_samples 15200 unjoint_samples 15200 joint_samples 44 [148769, 1045545] +processed_samples 15200 unjoint_samples 15200 joint_samples 47 [358741, 1048321] +processed_samples 15200 unjoint_samples 15200 joint_samples 47 [358741, 1048321] +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [526695, 1008628]processed_samples 15200 unjoint_samples 15200 joint_samples 45 [526695, 1008628] + +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [1047137, 705044] +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [1047137, 705044] +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [339058, 1035609] +processed_samples 15200 unjoint_samples 15200 joint_samples 45 [339058, 1035609] +processed_samples 15200 unjoint_samples 15200 joint_samples 44 [1046465, 687341] +processed_samples 15200 unjoint_samples 15200 joint_samples 44 [1046465, 687341] +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed2f56000] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +processed_samples 15300 unjoint_samples 15300 joint_samples 47 [1033089, 468626] +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [519561, 1042703] +processed_samples 15300 unjoint_samples 15300 joint_samples 44 [576814, 1045545] +processed_samples 15300 unjoint_samples 15300 joint_samples 44 [1046465, 945435] +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [1047137, 986878] +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [872616, 1008628] +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [599237, 1035609] +processed_samples 15300 unjoint_samples 15300 joint_samples 47 [605926, 1048321] +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +processed_samples 15300 unjoint_samples 15300 joint_samples 47 [1033089, 468626] +processed_samples 15300 unjoint_samples 15300 joint_samples 44 [576814, 1045545] +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [519561, 1042703] +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [872616, 1008628] +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [599237, 1035609] +[h264 @ 0x55eed5d6f840] mmco: unref short failure +processed_samples 15300 unjoint_samples 15300 joint_samples 47 [605926, 1048321] +processed_samples 15300 unjoint_samples 15300 joint_samples 45 [1047137, 986878] +processed_samples 15300 unjoint_samples 15300 joint_samples 44 [1046465, 945435] +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563879d9bfc0] mmco: unref short failure +[h264 @ 0x563879d9bfc0] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +processed_samples 15400 unjoint_samples 15400 joint_samples 45 [135165, 1046674] +processed_samples 15400 unjoint_samples 15400 joint_samples 46 [172181, 1014589] +processed_samples 15400 unjoint_samples 15400 joint_samples 46 [239440, 1044095] +processed_samples 15400 unjoint_samples 15400 joint_samples 45 [852170, 1042703] +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +processed_samples 15400 unjoint_samples 15400 joint_samples 44 [919477, 1045545] +processed_samples 15400 unjoint_samples 15400 joint_samples 47 [1033089, 750921] +processed_samples 15400 unjoint_samples 15400 joint_samples 45 [135165, 1046674] +processed_samples 15400 unjoint_samples 15400 joint_samples 46 [239440, 1044095] +processed_samples 15400 unjoint_samples 15400 joint_samples 46 [172181, 1014589] +processed_samples 15400 unjoint_samples 15400 joint_samples 47 [996368, 1048321] +processed_samples 15400 unjoint_samples 15400 joint_samples 45 [1036741, 1035844] +processed_samples 15400 unjoint_samples 15400 joint_samples 47 [996368, 1048321] +processed_samples 15400 unjoint_samples 15400 joint_samples 44 [919477, 1045545] +processed_samples 15400 unjoint_samples 15400 joint_samples 47 [1033089, 750921] +processed_samples 15400 unjoint_samples 15400 joint_samples 45 [852170, 1042703] +processed_samples 15400 unjoint_samples 15400 joint_samples 45 [1036741, 1035844] +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eedb863d80] mmco: unref short failure +[h264 @ 0x55eedb863d80] mmco: unref short failure +[h264 @ 0x55eedb863d80] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +processed_samples 15500 unjoint_samples 15500 joint_samples 45 [1044130, 244596] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [1034347, 270057] +processed_samples 15500 unjoint_samples 15500 joint_samples 45 [562173, 1046674] +processed_samples 15500 unjoint_samples 15500 joint_samples 45 [1044130, 244596] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [272707, 1046445] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [1034347, 270057] +processed_samples 15500 unjoint_samples 15500 joint_samples 48 [232815, 1048321] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [272707, 1046445] +processed_samples 15500 unjoint_samples 15500 joint_samples 48 [232815, 1048321] +processed_samples 15500 unjoint_samples 15500 joint_samples 45 [562173, 1046674] +processed_samples 15500 unjoint_samples 15500 joint_samples 47 [1047804, 1045977] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [496927, 1014589] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [496927, 1014589] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [572567, 1044095] +processed_samples 15500 unjoint_samples 15500 joint_samples 46 [572567, 1044095] +processed_samples 15500 unjoint_samples 15500 joint_samples 47 [1047804, 1045977] +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x563875e2ca80] mmco: unref short failure +[h264 @ 0x563875e2ca80] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x56386988a480] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +processed_samples 15600 unjoint_samples 15600 joint_samples 45 [1044130, 589211] +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [524926, 1046445] +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [1034347, 721630] +[h264 @ 0x55eed310b440] mmco: unref short failure +processed_samples 15600 unjoint_samples 15600 joint_samples 48 [1047804, 266913] +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +processed_samples 15600 unjoint_samples 15600 joint_samples 48 [619672, 1048321] +[h264 @ 0x563878ceacc0] mmco: unref short failure +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [907286, 1044095] +[h264 @ 0x563875f3df00] mmco: unref short failure +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [797458, 1014589] +processed_samples 15600 unjoint_samples 15600 joint_samples 45 [857174, 1046674] +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +processed_samples 15600 unjoint_samples 15600 joint_samples 45 [1044130, 589211] +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [524926, 1046445] +processed_samples 15600 unjoint_samples 15600 joint_samples 48 [1047804, 266913] +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [1034347, 721630] +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +processed_samples 15600 unjoint_samples 15600 joint_samples 48 [619672, 1048321] +processed_samples 15600 unjoint_samples 15600 joint_samples 45 [857174, 1046674] +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [907286, 1044095] +processed_samples 15600 unjoint_samples 15600 joint_samples 46 [797458, 1014589] +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +processed_samples 15700 unjoint_samples 15700 joint_samples 47 [1046742, 147616] +processed_samples 15700 unjoint_samples 15700 joint_samples 47 [1045987, 142273] +processed_samples 15700 unjoint_samples 15700 joint_samples 47 [1046742, 147616] +processed_samples 15700 unjoint_samples 15700 joint_samples 47 [1045987, 142273] +processed_samples 15700 unjoint_samples 15700 joint_samples 46 [164871, 1046674] +processed_samples 15700 unjoint_samples 15700 joint_samples 48 [1047804, 687534] +processed_samples 15700 unjoint_samples 15700 joint_samples 45 [1044130, 946456] +processed_samples 15700 unjoint_samples 15700 joint_samples 46 [164871, 1046674] +processed_samples 15700 unjoint_samples 15700 joint_samples 48 [1047804, 687534] +processed_samples 15700 unjoint_samples 15700 joint_samples 45 [1044130, 946456] +processed_samples 15700 unjoint_samples 15700 joint_samples 47 [23984, 1037958] +processed_samples 15700 unjoint_samples 15700 joint_samples 47 [23984, 1037958] +processed_samples 15700 unjoint_samples 15700 joint_samples 48 [901500, 1048321] +processed_samples 15700 unjoint_samples 15700 joint_samples 46 [907555, 1046445] +processed_samples 15700 unjoint_samples 15700 joint_samples 48 [901500, 1048321] +processed_samples 15700 unjoint_samples 15700 joint_samples 46 [907555, 1046445] +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eedb863d80] mmco: unref short failure +[h264 @ 0x55eedb863d80] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed4501640] mmco: unref short failure +[h264 @ 0x55eed4501640] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x56386988a480] mmco: unref short failure +[h264 @ 0x56386988a480] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +processed_samples 15800 unjoint_samples 15800 joint_samples 49 [25517, 1046987] +processed_samples 15800 unjoint_samples 15800 joint_samples 49 [25517, 1046987] +processed_samples 15800 unjoint_samples 15800 joint_samples 46 [310007, 976494] +processed_samples 15800 unjoint_samples 15800 joint_samples 46 [310007, 976494] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [362867, 1037958] +processed_samples 15800 unjoint_samples 15800 joint_samples 46 [536583, 1046674] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [362867, 1037958] +processed_samples 15800 unjoint_samples 15800 joint_samples 46 [536583, 1046674] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [1045987, 562735] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [1045987, 562735] +processed_samples 15800 unjoint_samples 15800 joint_samples 49 [1042001, 236259] +processed_samples 15800 unjoint_samples 15800 joint_samples 49 [1042001, 236259] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [199332, 1046445] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [199332, 1046445] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [1046742, 466164] +processed_samples 15800 unjoint_samples 15800 joint_samples 47 [1046742, 466164] +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +processed_samples 15900 unjoint_samples 15900 joint_samples 46 [605139, 976494] +processed_samples 15900 unjoint_samples 15900 joint_samples 49 [551089, 1046987] +processed_samples 15900 unjoint_samples 15900 joint_samples 46 [782887, 1046674] +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [1045987, 825973] +processed_samples 15900 unjoint_samples 15900 joint_samples 49 [1042001, 506246] +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [496914, 1046445] +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [630417, 1037958] +[h264 @ 0x55eed7bb3200] mmco: unref short failure +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [1046742, 755487] +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 15900 unjoint_samples 15900 joint_samples 46 [605139, 976494] +processed_samples 15900 unjoint_samples 15900 joint_samples 49 [551089, 1046987] +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [1045987, 825973] +processed_samples 15900 unjoint_samples 15900 joint_samples 46 [782887, 1046674] +processed_samples 15900 unjoint_samples 15900 joint_samples 49 [1042001, 506246] +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [496914, 1046445] +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [630417, 1037958] +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +processed_samples 15900 unjoint_samples 15900 joint_samples 47 [1046742, 755487] +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +processed_samples 16000 unjoint_samples 16000 joint_samples 49 [828504, 1046987] +processed_samples 16000 unjoint_samples 16000 joint_samples 49 [828504, 1046987] +processed_samples 16000 unjoint_samples 16000 joint_samples 48 [20186, 1047037] +processed_samples 16000 unjoint_samples 16000 joint_samples 48 [20186, 1047037] +processed_samples 16000 unjoint_samples 16000 joint_samples 47 [812767, 1046445] +processed_samples 16000 unjoint_samples 16000 joint_samples 47 [812767, 1046445] +processed_samples 16000 unjoint_samples 16000 joint_samples 47 [1046476, 51958] +processed_samples 16000 unjoint_samples 16000 joint_samples 47 [1046476, 51958] +processed_samples 16000 unjoint_samples 16000 joint_samples 48 [1047144, 60074] +processed_samples 16000 unjoint_samples 16000 joint_samples 48 [1047144, 60074] +processed_samples 16000 unjoint_samples 16000 joint_samples 47 [917625, 1037958] +processed_samples 16000 unjoint_samples 16000 joint_samples 47 [917625, 1037958] +processed_samples 16000 unjoint_samples 16000 joint_samples 49 [1042001, 923607] +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +processed_samples 16000 unjoint_samples 16000 joint_samples 49 [1042001, 923607] +processed_samples 16000 unjoint_samples 16000 joint_samples 46 [922332, 976494] +processed_samples 16000 unjoint_samples 16000 joint_samples 46 [922332, 976494] +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eedb750980] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x56387a03ec00] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +processed_samples 16100 unjoint_samples 16100 joint_samples 50 [1037289, 152628] +processed_samples 16100 unjoint_samples 16100 joint_samples 50 [1037289, 152628] +processed_samples 16100 unjoint_samples 16100 joint_samples 47 [126313, 1040599] +processed_samples 16100 unjoint_samples 16100 joint_samples 47 [126313, 1040599] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [425498, 1047037] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [425498, 1047037] +processed_samples 16100 unjoint_samples 16100 joint_samples 47 [1046476, 415007] +processed_samples 16100 unjoint_samples 16100 joint_samples 47 [1046476, 415007] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [1044382, 147789] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [1044382, 147789] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [69154, 1046445] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [69154, 1046445] +processed_samples 16100 unjoint_samples 16100 joint_samples 50 [232323, 1048085] +processed_samples 16100 unjoint_samples 16100 joint_samples 50 [232323, 1048085] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [1047144, 394050] +processed_samples 16100 unjoint_samples 16100 joint_samples 48 [1047144, 394050] +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] [h264 @ 0x56386988a480] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed2f56000] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 16200 unjoint_samples 16200 joint_samples 50 [1037289, 458586] +processed_samples 16200 unjoint_samples 16200 joint_samples 47 [447397, 1040599] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [382990, 1046445] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [781562, 1047037] +processed_samples 16200 unjoint_samples 16200 joint_samples 47 [1046476, 714893] +processed_samples 16200 unjoint_samples 16200 joint_samples 50 [1037289, 458586] +processed_samples 16200 unjoint_samples 16200 joint_samples 50 [525182, 1048085] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [1044382, 501115] +processed_samples 16200 unjoint_samples 16200 joint_samples 47 [447397, 1040599] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [382990, 1046445] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [781562, 1047037] +processed_samples 16200 unjoint_samples 16200 joint_samples 47 [1046476, 714893] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [1044382, 501115] +processed_samples 16200 unjoint_samples 16200 joint_samples 50 [525182, 1048085] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [1047144, 786373] +processed_samples 16200 unjoint_samples 16200 joint_samples 48 [1047144, 786373] +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +processed_samples 16300 unjoint_samples 16300 joint_samples 48 [1044382, 873282] +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +processed_samples 16300 unjoint_samples 16300 joint_samples 48 [1044382, 873282] +processed_samples 16300 unjoint_samples 16300 joint_samples 49 [1040023, 74094] +processed_samples 16300 unjoint_samples 16300 joint_samples 49 [1040023, 74094] +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +processed_samples 16300 unjoint_samples 16300 joint_samples 48 [138878, 1023099] +processed_samples 16300 unjoint_samples 16300 joint_samples 48 [138878, 1023099] +processed_samples 16300 unjoint_samples 16300 joint_samples 50 [1037289, 722287] +processed_samples 16300 unjoint_samples 16300 joint_samples 47 [801073, 1040599] +processed_samples 16300 unjoint_samples 16300 joint_samples 48 [1047144, 1037686] +processed_samples 16300 unjoint_samples 16300 joint_samples 50 [857285, 1048085] +processed_samples 16300 unjoint_samples 16300 joint_samples 50 [1037289, 722287] +processed_samples 16300 unjoint_samples 16300 joint_samples 47 [801073, 1040599] +processed_samples 16300 unjoint_samples 16300 joint_samples 50 [857285, 1048085] +processed_samples 16300 unjoint_samples 16300 joint_samples 48 [1047144, 1037686] +processed_samples 16300 unjoint_samples 16300 joint_samples 48 [938189, 1046445] +processed_samples 16300 unjoint_samples 16300 joint_samples 48 [938189, 1046445] +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +processed_samples 16400 unjoint_samples 16400 joint_samples 51 [9875, 1046924] +processed_samples 16400 unjoint_samples 16400 joint_samples 51 [9875, 1046924] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [162251, 1046445] +processed_samples 16400 unjoint_samples 16400 joint_samples 51 [1047580, 48642] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [162251, 1046445] +processed_samples 16400 unjoint_samples 16400 joint_samples 51 [1047580, 48642] +processed_samples 16400 unjoint_samples 16400 joint_samples 48 [555177, 1023099] +processed_samples 16400 unjoint_samples 16400 joint_samples 48 [555177, 1023099] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [189689, 1021308] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [189689, 1021308] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [306274, 1047459] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [306274, 1047459] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [1040023, 466853] +processed_samples 16400 unjoint_samples 16400 joint_samples 49 [1040023, 466853] +processed_samples 16400 unjoint_samples 16400 joint_samples 47 [1036183, 1040599] +processed_samples 16400 unjoint_samples 16400 joint_samples 47 [1036183, 1040599] +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4d30700] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4d30700] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed4750840] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x56387483a1c0] mmco: unref short failure +[h264 @ 0x56387483a1c0] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +processed_samples 16500 unjoint_samples 16500 joint_samples 51 [429249, 1046924] +processed_samples 16500 unjoint_samples 16500 joint_samples 51 [1047580, 353031] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [617060, 1047459] +processed_samples 16500 unjoint_samples 16500 joint_samples 48 [371561, 1046737] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [1040023, 775936] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [449814, 1046445] +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +processed_samples 16500 unjoint_samples 16500 joint_samples 51 [429249, 1046924] +processed_samples 16500 unjoint_samples 16500 joint_samples 51 [1047580, 353031] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [617060, 1047459] +processed_samples 16500 unjoint_samples 16500 joint_samples 48 [371561, 1046737] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [449814, 1046445] +processed_samples 16500 unjoint_samples 16500 joint_samples 48 [798450, 1023099] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [532895, 1021308] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [532895, 1021308] +processed_samples 16500 unjoint_samples 16500 joint_samples 48 [798450, 1023099] +processed_samples 16500 unjoint_samples 16500 joint_samples 49 [1040023, 775936] +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 16600 unjoint_samples 16600 joint_samples 51 [705800, 1046924] +processed_samples 16600 unjoint_samples 16600 joint_samples 50 [39312, 1030609] +processed_samples 16600 unjoint_samples 16600 joint_samples 51 [705800, 1046924] +processed_samples 16600 unjoint_samples 16600 joint_samples 50 [39312, 1030609] +processed_samples 16600 unjoint_samples 16600 joint_samples 48 [729457, 1046737] +processed_samples 16600 unjoint_samples 16600 joint_samples 48 [729457, 1046737] +processed_samples 16600 unjoint_samples 16600 joint_samples 51 [1047580, 705188] +processed_samples 16600 unjoint_samples 16600 joint_samples 49 [703701, 1046445] +processed_samples 16600 unjoint_samples 16600 joint_samples 49 [845218, 1021308] +processed_samples 16600 unjoint_samples 16600 joint_samples 51 [1047580, 705188] +processed_samples 16600 unjoint_samples 16600 joint_samples 49 [703701, 1046445] +processed_samples 16600 unjoint_samples 16600 joint_samples 48 [1043265, 1044926] +processed_samples 16600 unjoint_samples 16600 joint_samples 49 [845218, 1021308] +processed_samples 16600 unjoint_samples 16600 joint_samples 49 [967365, 1047459] +processed_samples 16600 unjoint_samples 16600 joint_samples 49 [967365, 1047459] +processed_samples 16600 unjoint_samples 16600 joint_samples 48 [1043265, 1044926] +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +processed_samples 16700 unjoint_samples 16700 joint_samples 50 [360164, 1030609] +processed_samples 16700 unjoint_samples 16700 joint_samples 50 [360164, 1030609] +processed_samples 16700 unjoint_samples 16700 joint_samples 52 [1045129, 4249] +processed_samples 16700 unjoint_samples 16700 joint_samples 52 [1045129, 4249] +processed_samples 16700 unjoint_samples 16700 joint_samples 49 [1041970, 30837] +processed_samples 16700 unjoint_samples 16700 joint_samples 49 [1041970, 30837] +processed_samples 16700 unjoint_samples 16700 joint_samples 50 [1046585, 39382] +processed_samples 16700 unjoint_samples 16700 joint_samples 50 [1046585, 39382] +processed_samples 16700 unjoint_samples 16700 joint_samples 51 [1047580, 982348] +processed_samples 16700 unjoint_samples 16700 joint_samples 51 [1047580, 982348] +processed_samples 16700 unjoint_samples 16700 joint_samples 50 [1042437, 278254] +processed_samples 16700 unjoint_samples 16700 joint_samples 50 [1042437, 278254] +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +processed_samples 16700 unjoint_samples 16700 joint_samples 49 [1045646, 230299] +processed_samples 16700 unjoint_samples 16700 joint_samples 49 [1045646, 230299] +processed_samples 16700 unjoint_samples 16700 joint_samples 50 [1046966, 33409] +processed_samples 16700 unjoint_samples 16700 joint_samples 50 [1046966, 33409] +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +processed_samples 16800 unjoint_samples 16800 joint_samples 49 [1045646, 633702] +processed_samples 16800 unjoint_samples 16800 joint_samples 49 [1045646, 633702] +processed_samples 16800 unjoint_samples 16800 joint_samples 52 [191195, 1046437] +processed_samples 16800 unjoint_samples 16800 joint_samples 52 [191195, 1046437] +processed_samples 16800 unjoint_samples 16800 joint_samples 52 [1045129, 320344] +processed_samples 16800 unjoint_samples 16800 joint_samples 52 [1045129, 320344] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [708735, 1030609] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [708735, 1030609] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [1046585, 333650] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [1046585, 333650] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [1042437, 523873] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [1042437, 523873] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [1046966, 400473] +processed_samples 16800 unjoint_samples 16800 joint_samples 50 [1046966, 400473] +processed_samples 16800 unjoint_samples 16800 joint_samples 49 [1041970, 416302] +processed_samples 16800 unjoint_samples 16800 joint_samples 49 [1041970, 416302] +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [1037034, 1036890] +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [1037034, 1036890] +processed_samples 16900 unjoint_samples 16900 joint_samples 52 [1045129, 655177] +processed_samples 16900 unjoint_samples 16900 joint_samples 52 [1045129, 655177] +processed_samples 16900 unjoint_samples 16900 joint_samples 49 [1041970, 703670] +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [1042437, 840244] +processed_samples 16900 unjoint_samples 16900 joint_samples 49 [1045646, 930923] +processed_samples 16900 unjoint_samples 16900 joint_samples 49 [1041970, 703670] +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [1042437, 840244] +processed_samples 16900 unjoint_samples 16900 joint_samples 49 [1045646, 930923] +processed_samples 16900 unjoint_samples 16900 joint_samples 52 [447967, 1046437] +processed_samples 16900 unjoint_samples 16900 joint_samples 52 [447967, 1046437] +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [1046585, 667755] +[h264 @ 0x5638786e5a40] mmco: unref short failure +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [1046585, 667755] +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [1046966, 742295] +processed_samples 16900 unjoint_samples 16900 joint_samples 50 [1046966, 742295] +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed4750840] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed30280c0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +processed_samples 17000 unjoint_samples 17000 joint_samples 49 [1041970, 1030318] +processed_samples 17000 unjoint_samples 17000 joint_samples 50 [272758, 1046968] +processed_samples 17000 unjoint_samples 17000 joint_samples 51 [1044718, 35799] +processed_samples 17000 unjoint_samples 17000 joint_samples 51 [284629, 1045241] +processed_samples 17000 unjoint_samples 17000 joint_samples 52 [740843, 1046437] +processed_samples 17000 unjoint_samples 17000 joint_samples 52 [1045129, 917730] +processed_samples 17000 unjoint_samples 17000 joint_samples 50 [272758, 1046968] +processed_samples 17000 unjoint_samples 17000 joint_samples 49 [1041970, 1030318] +processed_samples 17000 unjoint_samples 17000 joint_samples 51 [1044718, 35799] +processed_samples 17000 unjoint_samples 17000 joint_samples 51 [284629, 1045241] +processed_samples 17000 unjoint_samples 17000 joint_samples 52 [740843, 1046437] +processed_samples 17000 unjoint_samples 17000 joint_samples 52 [1045129, 917730] +processed_samples 17000 unjoint_samples 17000 joint_samples 50 [1046585, 946241] +processed_samples 17000 unjoint_samples 17000 joint_samples 50 [1046585, 946241] +processed_samples 17000 unjoint_samples 17000 joint_samples 51 [22471, 1040365] +processed_samples 17000 unjoint_samples 17000 joint_samples 51 [22471, 1040365] +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +processed_samples 17100 unjoint_samples 17100 joint_samples 50 [1048202, 343766] +[h264 @ 0x55eed4750840] mmco: unref short failure +[h264 @ 0x55eed4750840] mmco: unref short failure +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [1046585, 169084] +processed_samples 17100 unjoint_samples 17100 joint_samples 53 [160492, 1031029] +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [1044718, 239756] +processed_samples 17100 unjoint_samples 17100 joint_samples 50 [608131, 1046968] +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [564422, 1045241] +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [329050, 1040365] +processed_samples 17100 unjoint_samples 17100 joint_samples 50 [1048202, 343766] +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [1046585, 169084] +processed_samples 17100 unjoint_samples 17100 joint_samples 53 [160492, 1031029] +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [1044718, 239756] +processed_samples 17100 unjoint_samples 17100 joint_samples 50 [608131, 1046968] +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [564422, 1045241] +processed_samples 17100 unjoint_samples 17100 joint_samples 51 [329050, 1040365] +processed_samples 17100 unjoint_samples 17100 joint_samples 52 [1017130, 1046437] +processed_samples 17100 unjoint_samples 17100 joint_samples 52 [1017130, 1046437] +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x56387b881a80] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x563874c4b200] mmco: unref short failure +[h264 @ 0x563874c4b200] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x5638757f3980] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +processed_samples 17200 unjoint_samples 17200 joint_samples 51 [841843, 1045241] +processed_samples 17200 unjoint_samples 17200 joint_samples 53 [429266, 1031029] +processed_samples 17200 unjoint_samples 17200 joint_samples 50 [1048202, 698797] +[h264 @ 0x563875706fc0] mmco: unref short failure +processed_samples 17200 unjoint_samples 17200 joint_samples 53 [1021963, 283837] +processed_samples 17200 unjoint_samples 17200 joint_samples 53 [429266, 1031029] +processed_samples 17200 unjoint_samples 17200 joint_samples 51 [1044718, 567648] +processed_samples 17200 unjoint_samples 17200 joint_samples 51 [1046585, 524332] +processed_samples 17200 unjoint_samples 17200 joint_samples 50 [919439, 1046968] +processed_samples 17200 unjoint_samples 17200 joint_samples 51 [632840, 1040365] +processed_samples 17200 unjoint_samples 17200 joint_samples 53 [1021963, 283837] +processed_samples 17200 unjoint_samples 17200 joint_samples 51 [1046585, 524332] +processed_samples 17200 unjoint_samples 17200 joint_samples 51 [841843, 1045241] +processed_samples 17200 unjoint_samples 17200 joint_samples 51 [632840, 1040365] +processed_samples 17200 unjoint_samples 17200 joint_samples 50 [1048202, 698797] +processed_samples 17200 unjoint_samples 17200 joint_samples 51 [1044718, 567648] +processed_samples 17200 unjoint_samples 17200 joint_samples 50 [919439, 1046968] +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +processed_samples 17300 unjoint_samples 17300 joint_samples 52 [13397, 1047504] +processed_samples 17300 unjoint_samples 17300 joint_samples 52 [13397, 1047504] +processed_samples 17300 unjoint_samples 17300 joint_samples 51 [118088, 1039721] +processed_samples 17300 unjoint_samples 17300 joint_samples 51 [118088, 1039721] +processed_samples 17300 unjoint_samples 17300 joint_samples 51 [1044718, 917518] +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +processed_samples 17300 unjoint_samples 17300 joint_samples 51 [1046300, 107878] +processed_samples 17300 unjoint_samples 17300 joint_samples 51 [1046300, 107878] +processed_samples 17300 unjoint_samples 17300 joint_samples 53 [1021963, 671875] +processed_samples 17300 unjoint_samples 17300 joint_samples 51 [1046585, 830621] +processed_samples 17300 unjoint_samples 17300 joint_samples 51 [1046585, 830621] +processed_samples 17300 unjoint_samples 17300 joint_samples 53 [1021963, 671875] +processed_samples 17300 unjoint_samples 17300 joint_samples 53 [705878, 1031029] +processed_samples 17300 unjoint_samples 17300 joint_samples 51 [1044718, 917518] +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +processed_samples 17300 unjoint_samples 17300 joint_samples 53 [705878, 1031029] +processed_samples 17300 unjoint_samples 17300 joint_samples 51 [972859, 1040365] +processed_samples 17300 unjoint_samples 17300 joint_samples 51 [972859, 1040365] +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eeda7ac640] mmco: unref short failure +[h264 @ 0x55eeda7ac640] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +processed_samples 17400 unjoint_samples 17400 joint_samples 54 [1035820, 57266] +processed_samples 17400 unjoint_samples 17400 joint_samples 54 [1035820, 57266] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [1047004, 173074] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [1047004, 173074] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [69013, 1046541] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [69013, 1046541] +processed_samples 17400 unjoint_samples 17400 joint_samples 51 [1046300, 423002] +processed_samples 17400 unjoint_samples 17400 joint_samples 51 [1046300, 423002] +processed_samples 17400 unjoint_samples 17400 joint_samples 51 [568713, 1039721] +processed_samples 17400 unjoint_samples 17400 joint_samples 51 [568713, 1039721] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [296404, 1047504] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [296404, 1047504] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [243823, 1043110] +processed_samples 17400 unjoint_samples 17400 joint_samples 52 [243823, 1043110] +processed_samples 17400 unjoint_samples 17400 joint_samples 53 [1030915, 1031533] +processed_samples 17400 unjoint_samples 17400 joint_samples 53 [1030915, 1031533] +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563874b10f40] mmco: unref short failure +[h264 @ 0x563874b10f40] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563874b10f40] mmco: unref short failure +[h264 @ 0x563874b10f40] mmco: unref short failure +[h264 @ 0x55eed36364c0] mmco: unref short failure +[h264 @ 0x55eed36364c0] mmco: unref short failure +[h264 @ 0x563874b10f40] mmco: unref short failure +[h264 @ 0x563874b10f40] mmco: unref short failure +[h264 @ 0x55eed36364c0] mmco: unref short failure +[h264 @ 0x55eed36364c0] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +processed_samples 17500 unjoint_samples 17500 joint_samples 51 [881458, 1039721] +processed_samples 17500 unjoint_samples 17500 joint_samples 54 [1035820, 508477] +processed_samples 17500 unjoint_samples 17500 joint_samples 54 [1042829, 302234] +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +processed_samples 17500 unjoint_samples 17500 joint_samples 52 [456777, 1046541] +processed_samples 17500 unjoint_samples 17500 joint_samples 52 [1047004, 648958] +processed_samples 17500 unjoint_samples 17500 joint_samples 52 [658321, 1047504] +processed_samples 17500 unjoint_samples 17500 joint_samples 51 [881458, 1039721] +processed_samples 17500 unjoint_samples 17500 joint_samples 54 [1035820, 508477] +processed_samples 17500 unjoint_samples 17500 joint_samples 51 [1046300, 713256] +processed_samples 17500 unjoint_samples 17500 joint_samples 52 [658321, 1047504] +processed_samples 17500 unjoint_samples 17500 joint_samples 54 [1042829, 302234] +processed_samples 17500 unjoint_samples 17500 joint_samples 52 [563040, 1043110] +processed_samples 17500 unjoint_samples 17500 joint_samples 52 [456777, 1046541] +processed_samples 17500 unjoint_samples 17500 joint_samples 52 [1047004, 648958] +processed_samples 17500 unjoint_samples 17500 joint_samples 51 [1046300, 713256] +processed_samples 17500 unjoint_samples 17500 joint_samples 52 [563040, 1043110] +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563874b10f40] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed36364c0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x563874b10f40] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed36364c0] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563874b10f40] mmco: unref short failure +[h264 @ 0x563874b10f40] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed36364c0] mmco: unref short failure +[h264 @ 0x55eed36364c0] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed36364c0] mmco: unref short failure +[h264 @ 0x55eed36364c0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563874b10f40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563874b10f40] mmco: unref short failure +[h264 @ 0x563874b10f40] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x563874b10f40] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x563874b10f40] mmco: unref short failure +[h264 @ 0x563874b10f40] mmco: unref short failure +[h264 @ 0x563874b10f40] mmco: unref short failure +[h264 @ 0x563874b10f40] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +processed_samples 17600 unjoint_samples 17600 joint_samples 52 [199898, 1047003] +processed_samples 17600 unjoint_samples 17600 joint_samples 54 [1035820, 874599] +processed_samples 17600 unjoint_samples 17600 joint_samples 52 [810395, 1043110] +processed_samples 17600 unjoint_samples 17600 joint_samples 52 [768800, 1046541] +[h264 @ 0x55eed77a4340] mmco: unref short failure +processed_samples 17600 unjoint_samples 17600 joint_samples 52 [946938, 1047504] +processed_samples 17600 unjoint_samples 17600 joint_samples 54 [1042829, 576859] +processed_samples 17600 unjoint_samples 17600 joint_samples 52 [1047004, 1044332] +[h264 @ 0x563875474080] mmco: unref short failure +processed_samples 17600 unjoint_samples 17600 joint_samples 51 [1046300, 1018331] +processed_samples 17600 unjoint_samples 17600 joint_samples 52 [199898, 1047003] +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +processed_samples 17600 unjoint_samples 17600 joint_samples 52 [768800, 1046541] +processed_samples 17600 unjoint_samples 17600 joint_samples 52 [1047004, 1044332] +processed_samples 17600 unjoint_samples 17600 joint_samples 54 [1035820, 874599] +processed_samples 17600 unjoint_samples 17600 joint_samples 54 [1042829, 576859] +processed_samples 17600 unjoint_samples 17600 joint_samples 52 [810395, 1043110] +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +processed_samples 17600 unjoint_samples 17600 joint_samples 51 [1046300, 1018331] +processed_samples 17600 unjoint_samples 17600 joint_samples 52 [946938, 1047504] +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x55eed33f0000] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638747c4140] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eeda7ac640] mmco: unref short failure +[h264 @ 0x55eeda7ac640] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +processed_samples 17700 unjoint_samples 17700 joint_samples 52 [315442, 1039358] +processed_samples 17700 unjoint_samples 17700 joint_samples 52 [315442, 1039358] +processed_samples 17700 unjoint_samples 17700 joint_samples 55 [172518, 1041811] +processed_samples 17700 unjoint_samples 17700 joint_samples 55 [172518, 1041811] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [285421, 1046813] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [285421, 1046813] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [106320, 1046599] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [106320, 1046599] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [1004807, 288456] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [1004807, 288456] +processed_samples 17700 unjoint_samples 17700 joint_samples 52 [531614, 1047003] +processed_samples 17700 unjoint_samples 17700 joint_samples 52 [531614, 1047003] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [1046390, 130823] +processed_samples 17700 unjoint_samples 17700 joint_samples 53 [1046390, 130823] +processed_samples 17700 unjoint_samples 17700 joint_samples 54 [1042829, 918161] +processed_samples 17700 unjoint_samples 17700 joint_samples 54 [1042829, 918161] +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +processed_samples 17800 unjoint_samples 17800 joint_samples 55 [448028, 1041811] +processed_samples 17800 unjoint_samples 17800 joint_samples 55 [173639, 1047438] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [1004807, 618617] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [658731, 1046813] +processed_samples 17800 unjoint_samples 17800 joint_samples 55 [173639, 1047438] +processed_samples 17800 unjoint_samples 17800 joint_samples 55 [448028, 1041811] +processed_samples 17800 unjoint_samples 17800 joint_samples 52 [722482, 1039358] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [383872, 1046599] +[h264 @ 0x5638760c6000] mmco: unref short failure +processed_samples 17800 unjoint_samples 17800 joint_samples 52 [802236, 1047003] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [1004807, 618617] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [658731, 1046813] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [1046390, 429052] +[h264 @ 0x55eed3fe1600] mmco: unref short failure +processed_samples 17800 unjoint_samples 17800 joint_samples 52 [722482, 1039358] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [1046390, 429052] +processed_samples 17800 unjoint_samples 17800 joint_samples 53 [383872, 1046599] +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +processed_samples 17800 unjoint_samples 17800 joint_samples 52 [802236, 1047003] +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x55eed82541c0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed36364c0] mmco: unref short failure +[h264 @ 0x55eed36364c0] mmco: unref short failure +[h264 @ 0x56387636fd40] mmco: unref short failure +[h264 @ 0x56387636fd40] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +[h264 @ 0x5638757cae80] mmco: unref short failure +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [993408, 127127] +processed_samples 17900 unjoint_samples 17900 joint_samples 55 [428825, 1047438] +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [718557, 1046599] +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [967547, 1046813] +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [1046390, 831802] +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [1004807, 920499] +processed_samples 17900 unjoint_samples 17900 joint_samples 52 [982260, 1039358] +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [993408, 127127] +processed_samples 17900 unjoint_samples 17900 joint_samples 55 [790984, 1041811] +processed_samples 17900 unjoint_samples 17900 joint_samples 55 [428825, 1047438] +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [718557, 1046599] +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [1046390, 831802] +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [967547, 1046813] +processed_samples 17900 unjoint_samples 17900 joint_samples 52 [982260, 1039358] +processed_samples 17900 unjoint_samples 17900 joint_samples 53 [1004807, 920499] +processed_samples 17900 unjoint_samples 17900 joint_samples 55 [790984, 1041811] +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [1038354, 282085] +processed_samples 18000 unjoint_samples 18000 joint_samples 53 [993408, 428527] +processed_samples 18000 unjoint_samples 18000 joint_samples 53 [993408, 428527] +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [1038354, 282085] +processed_samples 18000 unjoint_samples 18000 joint_samples 56 [1048416, 15695] +processed_samples 18000 unjoint_samples 18000 joint_samples 56 [1048416, 15695] +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [1046390, 59145] +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [1046390, 59145] +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [214114, 1021950] +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [214114, 1021950] +processed_samples 18000 unjoint_samples 18000 joint_samples 53 [199371, 1046419] +processed_samples 18000 unjoint_samples 18000 joint_samples 53 [199371, 1046419] +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [1010469, 142439] +processed_samples 18000 unjoint_samples 18000 joint_samples 54 [1010469, 142439] +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +processed_samples 18000 unjoint_samples 18000 joint_samples 55 [715630, 1047438] +processed_samples 18000 unjoint_samples 18000 joint_samples 55 [715630, 1047438] +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x55eed3d8a9c0] mmco: unref short failure +[h264 @ 0x56387636fd40] mmco: unref short failure +[h264 @ 0x56387636fd40] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +processed_samples 18100 unjoint_samples 18100 joint_samples 56 [1048416, 362814] +processed_samples 18100 unjoint_samples 18100 joint_samples 56 [1048416, 362814] +processed_samples 18100 unjoint_samples 18100 joint_samples 53 [446208, 1046419] +processed_samples 18100 unjoint_samples 18100 joint_samples 56 [978310, 175954] +processed_samples 18100 unjoint_samples 18100 joint_samples 53 [446208, 1046419] +processed_samples 18100 unjoint_samples 18100 joint_samples 56 [978310, 175954] +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [1046390, 508774] +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [1038354, 515661] +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [1010469, 420939] +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [1046390, 508774] +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [1038354, 515661] +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [1010469, 420939] +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [623896, 1021950] +processed_samples 18100 unjoint_samples 18100 joint_samples 53 [993408, 822157] +processed_samples 18100 unjoint_samples 18100 joint_samples 53 [993408, 822157] +processed_samples 18100 unjoint_samples 18100 joint_samples 54 [623896, 1021950] +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed4d30700] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563878cb3cc0] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed5269640] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +processed_samples 18200 unjoint_samples 18200 joint_samples 56 [1048416, 749966] +processed_samples 18200 unjoint_samples 18200 joint_samples 54 [1031685, 1027630] +processed_samples 18200 unjoint_samples 18200 joint_samples 56 [1048416, 749966] +processed_samples 18200 unjoint_samples 18200 joint_samples 54 [1031685, 1027630] +processed_samples 18200 unjoint_samples 18200 joint_samples 54 [32279, 1026048] +processed_samples 18200 unjoint_samples 18200 joint_samples 54 [32279, 1026048] +processed_samples 18200 unjoint_samples 18200 joint_samples 53 [887274, 1046419] +processed_samples 18200 unjoint_samples 18200 joint_samples 53 [887274, 1046419] +processed_samples 18200 unjoint_samples 18200 joint_samples 54 [1038354, 801250] +processed_samples 18200 unjoint_samples 18200 joint_samples 54 [1038354, 801250] +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 18200 unjoint_samples 18200 joint_samples 54 [1046390, 814124] +processed_samples 18200 unjoint_samples 18200 joint_samples 54 [1046390, 814124] +processed_samples 18200 unjoint_samples 18200 joint_samples 54 [1010469, 652680] +[h264 @ 0x55eed3df04c0] mmco: unref short failure +processed_samples 18200 unjoint_samples 18200 joint_samples 54 [1010469, 652680] +processed_samples 18200 unjoint_samples 18200 joint_samples 56 [978310, 584943] +[h264 @ 0x563875ebb3c0] mmco: unref short failure +processed_samples 18200 unjoint_samples 18200 joint_samples 56 [978310, 584943] +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eeda7ac640] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [22939, 1047294] +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [22939, 1047294] +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [43184, 1046658] +processed_samples 18300 unjoint_samples 18300 joint_samples 54 [1043162, 147292] +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [1038128, 402874] +processed_samples 18300 unjoint_samples 18300 joint_samples 54 [305856, 1026048] +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [43184, 1046658] +processed_samples 18300 unjoint_samples 18300 joint_samples 54 [1043162, 147292] +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [1038128, 402874] +processed_samples 18300 unjoint_samples 18300 joint_samples 54 [305856, 1026048] +processed_samples 18300 unjoint_samples 18300 joint_samples 57 [104574, 1048033] +processed_samples 18300 unjoint_samples 18300 joint_samples 57 [104574, 1048033] +processed_samples 18300 unjoint_samples 18300 joint_samples 56 [978310, 890705] +processed_samples 18300 unjoint_samples 18300 joint_samples 56 [978310, 890705] +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [1023779, 30936] +processed_samples 18300 unjoint_samples 18300 joint_samples 55 [1023779, 30936] +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x56387d4ad200] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +processed_samples 18400 unjoint_samples 18400 joint_samples 57 [347707, 1048033] +processed_samples 18400 unjoint_samples 18400 joint_samples 57 [347707, 1048033] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [376703, 1046658] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [376703, 1046658] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [1038128, 795061] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [1038128, 795061] +processed_samples 18400 unjoint_samples 18400 joint_samples 57 [189939, 1006045] +processed_samples 18400 unjoint_samples 18400 joint_samples 57 [189939, 1006045] +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed2f56000] mmco: unref short failure +processed_samples 18400 unjoint_samples 18400 joint_samples 54 [1043162, 491103] +processed_samples 18400 unjoint_samples 18400 joint_samples 54 [1043162, 491103] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [294745, 1047294] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [294745, 1047294] +processed_samples 18400 unjoint_samples 18400 joint_samples 54 [686654, 1026048] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [1023779, 418117] +processed_samples 18400 unjoint_samples 18400 joint_samples 54 [686654, 1026048] +processed_samples 18400 unjoint_samples 18400 joint_samples 55 [1023779, 418117] +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x56386988a480] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x563878d0bec0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +processed_samples 18500 unjoint_samples 18500 joint_samples 56 [1048135, 66723] +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +processed_samples 18500 unjoint_samples 18500 joint_samples 55 [617941, 1047294] +processed_samples 18500 unjoint_samples 18500 joint_samples 57 [659592, 1048033] +processed_samples 18500 unjoint_samples 18500 joint_samples 57 [554366, 1006045] +processed_samples 18500 unjoint_samples 18500 joint_samples 54 [1043162, 813479] +processed_samples 18500 unjoint_samples 18500 joint_samples 55 [657645, 1046658] +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +processed_samples 18500 unjoint_samples 18500 joint_samples 55 [1023779, 691239] +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +processed_samples 18500 unjoint_samples 18500 joint_samples 54 [1031719, 1033959] +processed_samples 18500 unjoint_samples 18500 joint_samples 56 [1048135, 66723] +processed_samples 18500 unjoint_samples 18500 joint_samples 55 [617941, 1047294] +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +processed_samples 18500 unjoint_samples 18500 joint_samples 57 [554366, 1006045] +processed_samples 18500 unjoint_samples 18500 joint_samples 57 [659592, 1048033] +processed_samples 18500 unjoint_samples 18500 joint_samples 55 [657645, 1046658] +processed_samples 18500 unjoint_samples 18500 joint_samples 54 [1043162, 813479] +[h264 @ 0x5638755e4b80] mmco: unref short failure +processed_samples 18500 unjoint_samples 18500 joint_samples 55 [1023779, 691239] +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +processed_samples 18500 unjoint_samples 18500 joint_samples 54 [1031719, 1033959] +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed42c2940] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563878d3f3c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +processed_samples 18600 unjoint_samples 18600 joint_samples 57 [938188, 1048033] +processed_samples 18600 unjoint_samples 18600 joint_samples 55 [1038512, 238740] +processed_samples 18600 unjoint_samples 18600 joint_samples 56 [1048135, 269664] +processed_samples 18600 unjoint_samples 18600 joint_samples 57 [938188, 1048033] +processed_samples 18600 unjoint_samples 18600 joint_samples 55 [1038512, 238740] +processed_samples 18600 unjoint_samples 18600 joint_samples 56 [1048135, 269664] +processed_samples 18600 unjoint_samples 18600 joint_samples 54 [1045632, 1045923] +processed_samples 18600 unjoint_samples 18600 joint_samples 55 [954925, 1047294] +processed_samples 18600 unjoint_samples 18600 joint_samples 55 [1023779, 960286] +processed_samples 18600 unjoint_samples 18600 joint_samples 55 [1024426, 1046658] +processed_samples 18600 unjoint_samples 18600 joint_samples 57 [873678, 1006045] +processed_samples 18600 unjoint_samples 18600 joint_samples 54 [1045632, 1045923] +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +processed_samples 18600 unjoint_samples 18600 joint_samples 55 [954925, 1047294] +processed_samples 18600 unjoint_samples 18600 joint_samples 55 [1023779, 960286] +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +processed_samples 18600 unjoint_samples 18600 joint_samples 55 [1024426, 1046658] +processed_samples 18600 unjoint_samples 18600 joint_samples 57 [873678, 1006045] +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x563875e2ca80] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x563878d1b280] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +processed_samples 18700 unjoint_samples 18700 joint_samples 58 [1045663, 86484] +processed_samples 18700 unjoint_samples 18700 joint_samples 58 [1045663, 86484] +processed_samples 18700 unjoint_samples 18700 joint_samples 58 [1046287, 215646] +processed_samples 18700 unjoint_samples 18700 joint_samples 58 [1046287, 215646] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [176207, 1047294] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [176207, 1047294] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [376122, 1011058] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [376122, 1011058] +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [1025403, 354913] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [1025403, 354913] +processed_samples 18700 unjoint_samples 18700 joint_samples 55 [1038512, 712697] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [1048135, 508605] +processed_samples 18700 unjoint_samples 18700 joint_samples 55 [1038512, 712697] +processed_samples 18700 unjoint_samples 18700 joint_samples 56 [1048135, 508605] +processed_samples 18700 unjoint_samples 18700 joint_samples 55 [1047679, 442788] +processed_samples 18700 unjoint_samples 18700 joint_samples 55 [1047679, 442788] +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed2f56000] mmco: unref short failure +[h264 @ 0x55eed2f56000] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x5638751ede00] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875069800] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed448ff80] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +processed_samples 18800 unjoint_samples 18800 joint_samples 58 [1046287, 488734] +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 18800 unjoint_samples 18800 joint_samples 58 [1045663, 374974] +processed_samples 18800 unjoint_samples 18800 joint_samples 55 [1038512, 947507] +processed_samples 18800 unjoint_samples 18800 joint_samples 56 [1025403, 829823] +processed_samples 18800 unjoint_samples 18800 joint_samples 56 [662026, 1011058] +processed_samples 18800 unjoint_samples 18800 joint_samples 56 [1048135, 895967] +processed_samples 18800 unjoint_samples 18800 joint_samples 55 [1047679, 739440] +processed_samples 18800 unjoint_samples 18800 joint_samples 56 [533365, 1047294] +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +processed_samples 18800 unjoint_samples 18800 joint_samples 58 [1046287, 488734] +[h264 @ 0x55eed5d6f840] mmco: unref short failure +processed_samples 18800 unjoint_samples 18800 joint_samples 55 [1038512, 947507] +processed_samples 18800 unjoint_samples 18800 joint_samples 58 [1045663, 374974] +processed_samples 18800 unjoint_samples 18800 joint_samples 56 [1048135, 895967] +processed_samples 18800 unjoint_samples 18800 joint_samples 56 [1025403, 829823] +processed_samples 18800 unjoint_samples 18800 joint_samples 56 [533365, 1047294] +processed_samples 18800 unjoint_samples 18800 joint_samples 56 [662026, 1011058] +processed_samples 18800 unjoint_samples 18800 joint_samples 55 [1047679, 739440] +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638760c6000] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +processed_samples 18900 unjoint_samples 18900 joint_samples 56 [1048135, 318172] +[h264 @ 0x55eed787a100] mmco: unref short failure +processed_samples 18900 unjoint_samples 18900 joint_samples 56 [1048135, 318172] +[h264 @ 0x5638786e5a40] mmco: unref short failure +processed_samples 18900 unjoint_samples 18900 joint_samples 57 [242532, 1018923] +processed_samples 18900 unjoint_samples 18900 joint_samples 57 [242532, 1018923] +processed_samples 18900 unjoint_samples 18900 joint_samples 57 [115354, 1022580] +processed_samples 18900 unjoint_samples 18900 joint_samples 57 [247483, 1019460] +processed_samples 18900 unjoint_samples 18900 joint_samples 57 [247483, 1019460] +processed_samples 18900 unjoint_samples 18900 joint_samples 55 [1047679, 1021203] +processed_samples 18900 unjoint_samples 18900 joint_samples 56 [873304, 1047294] +processed_samples 18900 unjoint_samples 18900 joint_samples 57 [115354, 1022580] +processed_samples 18900 unjoint_samples 18900 joint_samples 56 [873304, 1047294] +processed_samples 18900 unjoint_samples 18900 joint_samples 55 [1047679, 1021203] +processed_samples 18900 unjoint_samples 18900 joint_samples 58 [1046287, 754560] +processed_samples 18900 unjoint_samples 18900 joint_samples 58 [1046287, 754560] +processed_samples 18900 unjoint_samples 18900 joint_samples 58 [1045663, 733965] +[h264 @ 0x55eed4373ac0] mmco: unref short failure +processed_samples 18900 unjoint_samples 18900 joint_samples 58 [1045663, 733965] +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +processed_samples 19000 unjoint_samples 19000 joint_samples 59 [69666, 1047655] +processed_samples 19000 unjoint_samples 19000 joint_samples 59 [69666, 1047655] +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +processed_samples 19000 unjoint_samples 19000 joint_samples 59 [65195, 1036219] +processed_samples 19000 unjoint_samples 19000 joint_samples 59 [65195, 1036219] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [1046652, 194393] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [1046652, 194393] +processed_samples 19000 unjoint_samples 19000 joint_samples 56 [217567, 1047487] +processed_samples 19000 unjoint_samples 19000 joint_samples 56 [217567, 1047487] +processed_samples 19000 unjoint_samples 19000 joint_samples 56 [1048135, 588407] +processed_samples 19000 unjoint_samples 19000 joint_samples 56 [1048135, 588407] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [620865, 1019460] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [620865, 1019460] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [595066, 1018923] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [595066, 1018923] +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [409542, 1022580] +[h264 @ 0x5638760c6000] mmco: unref short failure +processed_samples 19000 unjoint_samples 19000 joint_samples 57 [409542, 1022580] +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +processed_samples 19100 unjoint_samples 19100 joint_samples 59 [388903, 1047655] +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +processed_samples 19100 unjoint_samples 19100 joint_samples 59 [525605, 1036219] +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [1046652, 508144] +processed_samples 19100 unjoint_samples 19100 joint_samples 56 [461452, 1047487] +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [735113, 1022580] +processed_samples 19100 unjoint_samples 19100 joint_samples 56 [1048135, 931547] +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [918073, 1019460] +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [869837, 1018923] +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +processed_samples 19100 unjoint_samples 19100 joint_samples 59 [388903, 1047655] +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +processed_samples 19100 unjoint_samples 19100 joint_samples 59 [525605, 1036219] +processed_samples 19100 unjoint_samples 19100 joint_samples 56 [461452, 1047487] +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [1046652, 508144] +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [735113, 1022580] +processed_samples 19100 unjoint_samples 19100 joint_samples 56 [1048135, 931547] +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [918073, 1019460] +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +processed_samples 19100 unjoint_samples 19100 joint_samples 57 [869837, 1018923] +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed5bee740] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed7ec7980] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x55eed4c79700] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +processed_samples 19200 unjoint_samples 19200 joint_samples 58 [95219, 1040034] +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +processed_samples 19200 unjoint_samples 19200 joint_samples 59 [805255, 1047655] +processed_samples 19200 unjoint_samples 19200 joint_samples 58 [94310, 1042268] +processed_samples 19200 unjoint_samples 19200 joint_samples 57 [332604, 1047889] +processed_samples 19200 unjoint_samples 19200 joint_samples 57 [1046652, 855559] +processed_samples 19200 unjoint_samples 19200 joint_samples 58 [95219, 1040034] +processed_samples 19200 unjoint_samples 19200 joint_samples 57 [1041266, 1042108] +processed_samples 19200 unjoint_samples 19200 joint_samples 57 [332604, 1047889] +processed_samples 19200 unjoint_samples 19200 joint_samples 56 [829349, 1047487] +processed_samples 19200 unjoint_samples 19200 joint_samples 59 [805255, 1047655] +processed_samples 19200 unjoint_samples 19200 joint_samples 58 [94310, 1042268] +processed_samples 19200 unjoint_samples 19200 joint_samples 57 [1046652, 855559] +processed_samples 19200 unjoint_samples 19200 joint_samples 57 [1041266, 1042108] +processed_samples 19200 unjoint_samples 19200 joint_samples 56 [829349, 1047487] +processed_samples 19200 unjoint_samples 19200 joint_samples 60 [793858, 279475] +processed_samples 19200 unjoint_samples 19200 joint_samples 60 [793858, 279475] +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x5638783c6300] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x55eed49eb540] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x5638782c9e80] mmco: unref short failure +[h264 @ 0x5638782c9e80] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +processed_samples 19300 unjoint_samples 19300 joint_samples 60 [1046361, 12013] +processed_samples 19300 unjoint_samples 19300 joint_samples 60 [1046361, 12013] +processed_samples 19300 unjoint_samples 19300 joint_samples 57 [1047196, 121254] +processed_samples 19300 unjoint_samples 19300 joint_samples 57 [1047196, 121254] +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [66008, 1046927] +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [66008, 1046927] +processed_samples 19300 unjoint_samples 19300 joint_samples 57 [642753, 1047889] +processed_samples 19300 unjoint_samples 19300 joint_samples 57 [642753, 1047889] +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [375142, 1046772] +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [375142, 1046772] +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [402120, 1040034] +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [402120, 1040034] +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [385089, 1042268] +processed_samples 19300 unjoint_samples 19300 joint_samples 58 [385089, 1042268] +processed_samples 19300 unjoint_samples 19300 joint_samples 60 [793858, 532469] +processed_samples 19300 unjoint_samples 19300 joint_samples 60 [793858, 532469] +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563878d25880] mmco: unref short failure +[h264 @ 0x563878d25880] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x56387996a9c0] [h264 @ 0x55eed411f180] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x5638753c58c0] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x55eed3bc9740] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x55eedb863d80] mmco: unref short failure +[h264 @ 0x55eedb863d80] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +processed_samples 19400 unjoint_samples 19400 joint_samples 60 [1046361, 324945] +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [701890, 1046772] +processed_samples 19400 unjoint_samples 19400 joint_samples 57 [1047196, 406208] +processed_samples 19400 unjoint_samples 19400 joint_samples 60 [823176, 822254] +processed_samples 19400 unjoint_samples 19400 joint_samples 57 [953533, 1047889] +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [417312, 1046927] +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [654777, 1040034] +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [776312, 1042268] +[h264 @ 0x5638765f0700] mmco: unref short failure +processed_samples 19400 unjoint_samples 19400 joint_samples 60 [1046361, 324945] +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [701890, 1046772] +processed_samples 19400 unjoint_samples 19400 joint_samples 57 [1047196, 406208] +processed_samples 19400 unjoint_samples 19400 joint_samples 60 [823176, 822254] +processed_samples 19400 unjoint_samples 19400 joint_samples 57 [953533, 1047889] +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [417312, 1046927] +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [654777, 1040034] +processed_samples 19400 unjoint_samples 19400 joint_samples 58 [776312, 1042268] +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x563876413a00] mmco: unref short failure +[h264 @ 0x563876413a00] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +processed_samples 19500 unjoint_samples 19500 joint_samples 59 [8047, 1046994] +processed_samples 19500 unjoint_samples 19500 joint_samples 59 [8047, 1046994] +processed_samples 19500 unjoint_samples 19500 joint_samples 60 [1046361, 696637] +processed_samples 19500 unjoint_samples 19500 joint_samples 60 [1046361, 696637] +processed_samples 19500 unjoint_samples 19500 joint_samples 59 [1037602, 33109] +processed_samples 19500 unjoint_samples 19500 joint_samples 59 [1037602, 33109] +processed_samples 19500 unjoint_samples 19500 joint_samples 58 [1047802, 240963] +processed_samples 19500 unjoint_samples 19500 joint_samples 58 [1047802, 240963] +processed_samples 19500 unjoint_samples 19500 joint_samples 58 [1033799, 1040034] +processed_samples 19500 unjoint_samples 19500 joint_samples 58 [1033799, 1040034] +processed_samples 19500 unjoint_samples 19500 joint_samples 58 [762016, 1046927] +processed_samples 19500 unjoint_samples 19500 joint_samples 58 [762016, 1046927] +processed_samples 19500 unjoint_samples 19500 joint_samples 60 [959070, 960687] +processed_samples 19500 unjoint_samples 19500 joint_samples 57 [1047196, 741233] +processed_samples 19500 unjoint_samples 19500 joint_samples 57 [1047196, 741233] +processed_samples 19500 unjoint_samples 19500 joint_samples 60 [959070, 960687] +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed7529700] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x56387477ee00] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 19600 unjoint_samples 19600 joint_samples 61 [1046361, 57347] +processed_samples 19600 unjoint_samples 19600 joint_samples 61 [1046361, 57347] +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [355298, 1046994] +processed_samples 19600 unjoint_samples 19600 joint_samples 61 [1044844, 107586] +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [1031772, 39287] +processed_samples 19600 unjoint_samples 19600 joint_samples 61 [1044844, 107586] +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [355298, 1046994] +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [1031772, 39287] +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [1047775, 351203] +processed_samples 19600 unjoint_samples 19600 joint_samples 58 [1047196, 43648] +processed_samples 19600 unjoint_samples 19600 joint_samples 58 [1047196, 43648] +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [1047775, 351203] +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [1037602, 421825] +processed_samples 19600 unjoint_samples 19600 joint_samples 59 [1037602, 421825] +processed_samples 19600 unjoint_samples 19600 joint_samples 58 [1047802, 649091] +processed_samples 19600 unjoint_samples 19600 joint_samples 58 [1047802, 649091] +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x563875069800] mmco: unref short failure +[h264 @ 0x563875069800] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563874a940c0] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [1047775, 601597] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [1031772, 329417] +processed_samples 19700 unjoint_samples 19700 joint_samples 58 [1047196, 314883] +processed_samples 19700 unjoint_samples 19700 joint_samples 61 [1044844, 319580] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [664046, 1046994] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [1047775, 601597] +processed_samples 19700 unjoint_samples 19700 joint_samples 61 [1046361, 342623] +processed_samples 19700 unjoint_samples 19700 joint_samples 61 [1046361, 342623] +[h264 @ 0x563876420f40] mmco: unref short failure +processed_samples 19700 unjoint_samples 19700 joint_samples 58 [1047802, 980861] +processed_samples 19700 unjoint_samples 19700 joint_samples 61 [1044844, 319580] +processed_samples 19700 unjoint_samples 19700 joint_samples 58 [1047196, 314883] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [1031772, 329417] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [664046, 1046994] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [1037602, 734050] +processed_samples 19700 unjoint_samples 19700 joint_samples 59 [1037602, 734050] +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +processed_samples 19700 unjoint_samples 19700 joint_samples 58 [1047802, 980861] +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x56387b2c7140] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed77a6380] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x56387b3eca00] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed3f1cb80] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +processed_samples 19800 unjoint_samples 19800 joint_samples 58 [1047196, 558029] +processed_samples 19800 unjoint_samples 19800 joint_samples 59 [958620, 1046994] +processed_samples 19800 unjoint_samples 19800 joint_samples 59 [1047802, 254373] +processed_samples 19800 unjoint_samples 19800 joint_samples 59 [1031772, 687513] +processed_samples 19800 unjoint_samples 19800 joint_samples 61 [1046361, 672805] +processed_samples 19800 unjoint_samples 19800 joint_samples 59 [1047775, 925310] +processed_samples 19800 unjoint_samples 19800 joint_samples 61 [1044844, 679551] +processed_samples 19800 unjoint_samples 19800 joint_samples 58 [1047196, 558029] +processed_samples 19800 unjoint_samples 19800 joint_samples 59 [958620, 1046994] +processed_samples 19800 unjoint_samples 19800 joint_samples 59 [1047802, 254373] +processed_samples 19800 unjoint_samples 19800 joint_samples 59 [1037602, 1017299] +processed_samples 19800 unjoint_samples 19800 joint_samples 59 [1031772, 687513] +processed_samples 19800 unjoint_samples 19800 joint_samples 61 [1046361, 672805] +processed_samples 19800 unjoint_samples 19800 joint_samples 59 [1047775, 925310] +processed_samples 19800 unjoint_samples 19800 joint_samples 61 [1044844, 679551] +[h264 @ 0x5638785a6400] mmco: unref short failure +processed_samples 19800 unjoint_samples 19800 joint_samples 59 [1037602, 1017299] +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x5638763dde00] mmco: unref short failure +[h264 @ 0x5638763dde00] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x55eed7758d00] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x563875f3df00] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +processed_samples 19900 unjoint_samples 19900 joint_samples 60 [231220, 1028043] +processed_samples 19900 unjoint_samples 19900 joint_samples 60 [231220, 1028043] +processed_samples 19900 unjoint_samples 19900 joint_samples 59 [1047802, 488970] +processed_samples 19900 unjoint_samples 19900 joint_samples 60 [312758, 1047223] +processed_samples 19900 unjoint_samples 19900 joint_samples 59 [1047802, 488970] +processed_samples 19900 unjoint_samples 19900 joint_samples 58 [1047196, 1043080] +processed_samples 19900 unjoint_samples 19900 joint_samples 60 [1046771, 289507] +processed_samples 19900 unjoint_samples 19900 joint_samples 60 [1046771, 289507] +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +processed_samples 19900 unjoint_samples 19900 joint_samples 61 [1046361, 917326] +processed_samples 19900 unjoint_samples 19900 joint_samples 61 [1046361, 917326] +processed_samples 19900 unjoint_samples 19900 joint_samples 60 [312758, 1047223] +processed_samples 19900 unjoint_samples 19900 joint_samples 61 [1044844, 998934] +processed_samples 19900 unjoint_samples 19900 joint_samples 58 [1047196, 1043080] +processed_samples 19900 unjoint_samples 19900 joint_samples 61 [1044844, 998934] +processed_samples 19900 unjoint_samples 19900 joint_samples 59 [1031772, 943170] +processed_samples 19900 unjoint_samples 19900 joint_samples 59 [1031772, 943170] +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x563878ccba40] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x55eed4c1a2c0] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed3a68300] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563876dee640] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563878d7e780] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x55eed403c880] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed4063e00] mmco: unref short failure +[h264 @ 0x5638794f17c0] mmco: unref short failure +[h264 @ 0x55eed7772b40] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [480487, 1028043] +processed_samples 20000 unjoint_samples 20000 joint_samples 62 [236811, 1037812] +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [480487, 1028043] +processed_samples 20000 unjoint_samples 20000 joint_samples 62 [236811, 1037812] +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [562926, 1047223] +processed_samples 20000 unjoint_samples 20000 joint_samples 59 [320099, 1046727] +processed_samples 20000 unjoint_samples 20000 joint_samples 59 [320099, 1046727] +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [562926, 1047223] +processed_samples 20000 unjoint_samples 20000 joint_samples 62 [294792, 1028152] +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [1039492, 341026] +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [1046771, 539341] +processed_samples 20000 unjoint_samples 20000 joint_samples 62 [294792, 1028152] +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [1039492, 341026] +processed_samples 20000 unjoint_samples 20000 joint_samples 60 [1046771, 539341] +processed_samples 20000 unjoint_samples 20000 joint_samples 59 [1047802, 808499] +processed_samples 20000 unjoint_samples 20000 joint_samples 59 [1047802, 808499] +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed7bb3200] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eeda7ac640] mmco: unref short failure +[h264 @ 0x55eeda7ac640] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x563878efe680] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x563875218ac0] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x563874b7b400] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed557f600] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875864b80] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +processed_samples 20100 unjoint_samples 20100 joint_samples 62 [520546, 1037812] +processed_samples 20100 unjoint_samples 20100 joint_samples 60 [63217, 1046109] +processed_samples 20100 unjoint_samples 20100 joint_samples 60 [63217, 1046109] +processed_samples 20100 unjoint_samples 20100 joint_samples 62 [520546, 1037812] +processed_samples 20100 unjoint_samples 20100 joint_samples 60 [873953, 1028043] +processed_samples 20100 unjoint_samples 20100 joint_samples 59 [613706, 1046727] +processed_samples 20100 unjoint_samples 20100 joint_samples 60 [873953, 1028043] +processed_samples 20100 unjoint_samples 20100 joint_samples 59 [613706, 1046727] +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +processed_samples 20100 unjoint_samples 20100 joint_samples 62 [562637, 1028152] +processed_samples 20100 unjoint_samples 20100 joint_samples 62 [562637, 1028152] +processed_samples 20100 unjoint_samples 20100 joint_samples 60 [1039492, 615899] +processed_samples 20100 unjoint_samples 20100 joint_samples 60 [1039492, 615899] +processed_samples 20100 unjoint_samples 20100 joint_samples 60 [1046771, 851271] +processed_samples 20100 unjoint_samples 20100 joint_samples 60 [1046771, 851271] +processed_samples 20100 unjoint_samples 20100 joint_samples 61 [2120, 1047223] +processed_samples 20100 unjoint_samples 20100 joint_samples 61 [2120, 1047223] +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x563878ae5380] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x563878b24c80] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x56387996a9c0] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x563874b9d200] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed30e43c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +processed_samples 20200 unjoint_samples 20200 joint_samples 61 [1036894, 87851] +processed_samples 20200 unjoint_samples 20200 joint_samples 61 [350474, 1047223] +processed_samples 20200 unjoint_samples 20200 joint_samples 61 [1036894, 87851] +processed_samples 20200 unjoint_samples 20200 joint_samples 61 [350474, 1047223] +processed_samples 20200 unjoint_samples 20200 joint_samples 59 [924895, 1046727] +processed_samples 20200 unjoint_samples 20200 joint_samples 61 [242850, 1018196] +processed_samples 20200 unjoint_samples 20200 joint_samples 60 [328452, 1046109] +processed_samples 20200 unjoint_samples 20200 joint_samples 60 [328452, 1046109] +processed_samples 20200 unjoint_samples 20200 joint_samples 62 [808133, 1037812] +processed_samples 20200 unjoint_samples 20200 joint_samples 61 [242850, 1018196] +processed_samples 20200 unjoint_samples 20200 joint_samples 59 [924895, 1046727] +processed_samples 20200 unjoint_samples 20200 joint_samples 62 [808133, 1037812] +processed_samples 20200 unjoint_samples 20200 joint_samples 62 [797694, 1028152] +processed_samples 20200 unjoint_samples 20200 joint_samples 62 [797694, 1028152] +processed_samples 20200 unjoint_samples 20200 joint_samples 60 [1039492, 895268] +processed_samples 20200 unjoint_samples 20200 joint_samples 60 [1039492, 895268] +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x563875f41340] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x55eed36c4540] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x5638767b3100] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed425b900] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x55eed3e54b40] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x55eed3a89540] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563875231d40] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x563877ae5940] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed42e82c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563874943900] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] [h264 @ 0x563875db3980] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x5638786646c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x563875154ec0] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x563875e2c5c0] mmco: unref short failure +[h264 @ 0x563875e2c5c0] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +processed_samples 20300 unjoint_samples 20300 joint_samples 63 [136928, 1046452] +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [1042005, 145608] +processed_samples 20300 unjoint_samples 20300 joint_samples 60 [1045250, 196955] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [1036894, 439261] +processed_samples 20300 unjoint_samples 20300 joint_samples 60 [616109, 1046109] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [683630, 1047223] +processed_samples 20300 unjoint_samples 20300 joint_samples 63 [136928, 1046452] +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [1042005, 145608] +processed_samples 20300 unjoint_samples 20300 joint_samples 60 [1045250, 196955] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [1036894, 439261] +processed_samples 20300 unjoint_samples 20300 joint_samples 60 [616109, 1046109] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [683630, 1047223] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [598945, 1018196] +processed_samples 20300 unjoint_samples 20300 joint_samples 61 [598945, 1018196] +processed_samples 20300 unjoint_samples 20300 joint_samples 63 [23893, 1041815] +processed_samples 20300 unjoint_samples 20300 joint_samples 63 [23893, 1041815] +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3f1a240] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x56387665bfc0] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x55eed3999180] mmco: unref short failure +[h264 @ 0x5638786f4700] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x55eed43712c0] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x563875e2d840] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed787a100] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x55eed4373ac0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x55eed7ecc180] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638786e5a40] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +processed_samples 20400 unjoint_samples 20400 joint_samples 63 [602437, 1046452] +processed_samples 20400 unjoint_samples 20400 joint_samples 61 [1042005, 562339] +processed_samples 20400 unjoint_samples 20400 joint_samples 63 [272606, 1041815] +processed_samples 20400 unjoint_samples 20400 joint_samples 63 [602437, 1046452] +processed_samples 20400 unjoint_samples 20400 joint_samples 61 [1042005, 562339] +processed_samples 20400 unjoint_samples 20400 joint_samples 61 [1036894, 795113] +processed_samples 20400 unjoint_samples 20400 joint_samples 63 [272606, 1041815] +processed_samples 20400 unjoint_samples 20400 joint_samples 60 [1045250, 569521] +processed_samples 20400 unjoint_samples 20400 joint_samples 61 [940997, 1047223] +processed_samples 20400 unjoint_samples 20400 joint_samples 60 [1045250, 569521] +processed_samples 20400 unjoint_samples 20400 joint_samples 61 [1036894, 795113] +processed_samples 20400 unjoint_samples 20400 joint_samples 61 [940997, 1047223] +processed_samples 20400 unjoint_samples 20400 joint_samples 61 [932469, 1018196] +processed_samples 20400 unjoint_samples 20400 joint_samples 61 [932469, 1018196] +processed_samples 20400 unjoint_samples 20400 joint_samples 60 [1040059, 1046109] +processed_samples 20400 unjoint_samples 20400 joint_samples 60 [1040059, 1046109] +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x5638750f6f80] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed5a8f800] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x56386988a480] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x55eed4382780] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x55eed77a4340] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563878fb9640] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x563875c10e80] mmco: unref short failure +[h264 @ 0x55eed2f881c0] mmco: unref short failure +[h264 @ 0x5638796d2080] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +[h264 @ 0x55eed3490980] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x56387d684a80] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x55eed47b4d40] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x56387530bc40] mmco: unref short failure +[h264 @ 0x55eed34e1c40] mmco: unref short failure +[h264 @ 0x5638746f4540] mmco: unref short failure +[h264 @ 0x563875005cc0] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3ea3e40] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x55eed3501780] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563875706fc0] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +[h264 @ 0x563878e5e780] mmco: unref short failure +processed_samples 20500 unjoint_samples 20500 joint_samples 62 [273568, 1047223] +processed_samples 20500 unjoint_samples 20500 joint_samples 62 [1046114, 173222] +processed_samples 20500 unjoint_samples 20500 joint_samples 62 [216919, 1046654] +processed_samples 20500 unjoint_samples 20500 joint_samples 61 [1041644, 321517] +processed_samples 20500 unjoint_samples 20500 joint_samples 63 [596469, 1041815] +[h264 @ 0x55eed2f56000] mmco: unref short failure +processed_samples 20500 unjoint_samples 20500 joint_samples 60 [1045250, 854687] +processed_samples 20500 unjoint_samples 20500 joint_samples 63 [950820, 1046452] +processed_samples 20500 unjoint_samples 20500 joint_samples 61 [1042005, 969953] +processed_samples 20500 unjoint_samples 20500 joint_samples 62 [1046114, 173222] +processed_samples 20500 unjoint_samples 20500 joint_samples 62 [273568, 1047223] +processed_samples 20500 unjoint_samples 20500 joint_samples 62 [216919, 1046654] +processed_samples 20500 unjoint_samples 20500 joint_samples 61 [1041644, 321517] +processed_samples 20500 unjoint_samples 20500 joint_samples 63 [596469, 1041815] +[h264 @ 0x563875864b80] mmco: unref short failure +processed_samples 20500 unjoint_samples 20500 joint_samples 60 [1045250, 854687] +processed_samples 20500 unjoint_samples 20500 joint_samples 61 [1042005, 969953] +processed_samples 20500 unjoint_samples 20500 joint_samples 63 [950820, 1046452] +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x55eed3bb0e00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x563874824740] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x55eed37ad140] mmco: unref short failure +[h264 @ 0x563875ed27c0] mmco: unref short failure +[h264 @ 0x55eed3df04c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x55eed7b510c0] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875822040] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x5638791f6500] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4e24280] mmco: unref short failure +[h264 @ 0x55eed4b87140] mmco: unref short failure +[h264 @ 0x56387525db00] mmco: unref short failure +[h264 @ 0x55eed452d680] mmco: unref short failure +[h264 @ 0x56387525db00] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x55eed42f3280] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x563875487640] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x55eed5d6f840] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x5638786aa400] [h264 @ 0x55eed4edcac0] mmco: unref short failure +mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638762112c0] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed5011540] mmco: unref short failure +[h264 @ 0x56387885e400] mmco: unref short failure +[h264 @ 0x55eed4e5f300] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x56387d28b7c0] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +[h264 @ 0x55eed310b440] mmco: unref short failure +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [1046294, 135029] +processed_samples 20600 unjoint_samples 20600 joint_samples 64 [147219, 1046452] +processed_samples 20600 unjoint_samples 20600 joint_samples 61 [296896, 989697] +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [1046114, 487440] +processed_samples 20600 unjoint_samples 20600 joint_samples 64 [147219, 1046452] +processed_samples 20600 unjoint_samples 20600 joint_samples 61 [1041644, 627997] +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [1046294, 135029] +processed_samples 20600 unjoint_samples 20600 joint_samples 61 [296896, 989697] +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [1046114, 487440] +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [755116, 1047223] +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [577435, 1046654] +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [577435, 1046654] +processed_samples 20600 unjoint_samples 20600 joint_samples 62 [755116, 1047223] +processed_samples 20600 unjoint_samples 20600 joint_samples 63 [875983, 1041815] +processed_samples 20600 unjoint_samples 20600 joint_samples 61 [1041644, 627997] +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +[h264 @ 0x563875ebb3c0] mmco: unref short failure +processed_samples 20600 unjoint_samples 20600 joint_samples 63 [875983, 1041815] +[h264 @ 0x55eed40f2ac0] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed574fb80] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed8007e80] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed3717500] mmco: unref short failure +[h264 @ 0x55eed809ad00] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x55eed4bbfdc0] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563874614fc0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x5638786aa400] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x563874c67c00] mmco: unref short failure +[h264 @ 0x55eed4edcac0] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3fe1600] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed50f6040] mmco: unref short failure +[h264 @ 0x55eed411f180] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x56387473c8c0] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x5638785a6400] mmco: unref short failure +[h264 @ 0x56387b718480] mmco: unref short failure +[h264 @ 0x55eed4bbb980] mmco: unref short failure +[h264 @ 0x563878d42fc0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x55eed3556980] mmco: unref short failure +[h264 @ 0x55eed3556980] mmco: unref short failure +[h264 @ 0x55eed3556980] mmco: unref short failure +[h264 @ 0x55eed3556980] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x5638759645c0] mmco: unref short failure +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed38f99c0] mmco: unref short failure +[h264 @ 0x55eed3a8aa40] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x5638765f0700] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x56387562de80] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed3d6d600] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x55eed326b240] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x563878ceacc0] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x563876420f40] mmco: unref short failure +[h264 @ 0x55eed4083400] mmco: unref short failure +[h264 @ 0x56387d4ad200] mmco: unref short failure +[h264 @ 0x55eed3fe1600] Invalid NAL unit size (1113487007 > 108878). +[h264 @ 0x55eed3fe1600] Error splitting the input into NAL units. +[h264 @ 0x5638785a6400] Invalid NAL unit size (1113487007 > 108878). +[h264 @ 0x5638785a6400] Error splitting the input into NAL units. +[h264 @ 0x55eed3d3db00] mmco: unref short failure +[h264 @ 0x5638787dbdc0] mmco: unref short failure +processed_samples 20700 unjoint_samples 20700 joint_samples 64 [383932, 1046452] +processed_samples 20700 unjoint_samples 20700 joint_samples 62 [1046114, 820265] +processed_samples 20700 unjoint_samples 20700 joint_samples 61 [633483, 989697] +processed_samples 20700 unjoint_samples 20700 joint_samples 62 [1046294, 407317] +processed_samples 20700 unjoint_samples 20700 joint_samples 62 [1030613, 1047223] +processed_samples 20701 unjoint_samples 20700 joint_samples 64 [1044182, 162161] +processed_samples 20700 unjoint_samples 20700 joint_samples 61 [1041644, 910226] +processed_samples 20700 unjoint_samples 20700 joint_samples 62 [898011, 1046654] +[h264 @ 0x55eed38f99c0] mmco: unref short failure +processed_samples 20700 unjoint_samples 20700 joint_samples 64 [383932, 1046452] +processed_samples 20700 unjoint_samples 20700 joint_samples 62 [1046114, 820265] +processed_samples 20700 unjoint_samples 20700 joint_samples 62 [1030613, 1047223] +processed_samples 20700 unjoint_samples 20700 joint_samples 62 [1046294, 407317] +processed_samples 20700 unjoint_samples 20700 joint_samples 61 [633483, 989697] +processed_samples 20701 unjoint_samples 20700 joint_samples 64 [1044182, 162161] +processed_samples 20700 unjoint_samples 20700 joint_samples 62 [898011, 1046654] +processed_samples 20700 unjoint_samples 20700 joint_samples 61 [1041644, 910226] +[h264 @ 0x563875474080] mmco: unref short failure +[h264 @ 0x55eed6d92580] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x55eed3027c40] mmco: unref short failure +[h264 @ 0x563875414600] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x5638750176c0] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x55eed4575800] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x5638748eb3c0] mmco: unref short failure +[h264 @ 0x563875bb8440] mmco: unref short failure +[h264 @ 0x55eed6b8a040] mmco: unref short failure +[h264 @ 0x563875db3980] mmco: unref short failure +[h264 @ 0x55eed3a5e240] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x563875181640] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[h264 @ 0x55eed4db2680] mmco: unref short failure +[h264 @ 0x5638755e4b80] mmco: unref short failure +[2024-12-01 22:44:15,967] torch.distributed.elastic.agent.server.api: [ERROR] Error waiting on exit barrier. Elapsed: 300.051100730896 seconds ++ set +x