instruction
stringclasses
1 value
input
stringlengths
90
139k
output
stringlengths
16
138k
__index_level_0__
int64
165k
175k
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: SYSCALL_DEFINE1(inotify_init1, int, flags) { struct fsnotify_group *group; struct user_struct *user; int ret; /* Check the IN_* constants for consistency. */ BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC); BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK); if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) return -EINVAL; user = get_current_user(); if (unlikely(atomic_read(&user->inotify_devs) >= inotify_max_user_instances)) { ret = -EMFILE; goto out_free_uid; } /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */ group = inotify_new_group(user, inotify_max_queued_events); if (IS_ERR(group)) { ret = PTR_ERR(group); goto out_free_uid; } atomic_inc(&user->inotify_devs); ret = anon_inode_getfd("inotify", &inotify_fops, group, O_RDONLY | flags); if (ret >= 0) return ret; fsnotify_put_group(group); atomic_dec(&user->inotify_devs); out_free_uid: free_uid(user); return ret; } Commit Message: inotify: fix double free/corruption of stuct user On an error path in inotify_init1 a normal user can trigger a double free of struct user. This is a regression introduced by a2ae4cc9a16e ("inotify: stop kernel memory leak on file creation failure"). We fix this by making sure that if a group exists the user reference is dropped when the group is cleaned up. We should not explictly drop the reference on error and also drop the reference when the group is cleaned up. The new lifetime rules are that an inotify group lives from inotify_new_group to the last fsnotify_put_group. Since the struct user and inotify_devs are directly tied to this lifetime they are only changed/updated in those two locations. We get rid of all special casing of struct user or user->inotify_devs. Signed-off-by: Eric Paris <[email protected]> Cc: [email protected] (2.6.37 and up) Signed-off-by: Linus Torvalds <[email protected]> CWE ID: CWE-399
SYSCALL_DEFINE1(inotify_init1, int, flags) { struct fsnotify_group *group; int ret; /* Check the IN_* constants for consistency. */ BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC); BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK); if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) return -EINVAL; /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */ group = inotify_new_group(inotify_max_queued_events); if (IS_ERR(group)) return PTR_ERR(group); ret = anon_inode_getfd("inotify", &inotify_fops, group, O_RDONLY | flags); if (ret < 0) fsnotify_put_group(group); return ret; }
165,887
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void BrowserProcessMainImpl::Shutdown() { if (state_ != STATE_STARTED) { CHECK_NE(state_, STATE_SHUTTING_DOWN); return; MessagePump::Get()->Stop(); WebContentsUnloader::GetInstance()->Shutdown(); if (process_model_ != PROCESS_MODEL_SINGLE_PROCESS) { BrowserContext::AssertNoContextsExist(); } browser_main_runner_->Shutdown(); browser_main_runner_.reset(); if (process_model_ != PROCESS_MODEL_SINGLE_PROCESS) { BrowserContext::AssertNoContextsExist(); } browser_main_runner_->Shutdown(); browser_main_runner_.reset(); exit_manager_.reset(); main_delegate_.reset(); platform_delegate_.reset(); state_ = STATE_SHUTDOWN; } BrowserProcessMain::BrowserProcessMain() {} BrowserProcessMain::~BrowserProcessMain() {} ProcessModel BrowserProcessMain::GetProcessModelOverrideFromEnv() { static bool g_initialized = false; static ProcessModel g_process_model = PROCESS_MODEL_UNDEFINED; if (g_initialized) { return g_process_model; } g_initialized = true; std::unique_ptr<base::Environment> env = base::Environment::Create(); if (IsEnvironmentOptionEnabled("SINGLE_PROCESS", env.get())) { g_process_model = PROCESS_MODEL_SINGLE_PROCESS; } else { std::string model = GetEnvironmentOption("PROCESS_MODEL", env.get()); if (!model.empty()) { if (model == "multi-process") { g_process_model = PROCESS_MODEL_MULTI_PROCESS; } else if (model == "single-process") { g_process_model = PROCESS_MODEL_SINGLE_PROCESS; } else if (model == "process-per-site-instance") { g_process_model = PROCESS_MODEL_PROCESS_PER_SITE_INSTANCE; } else if (model == "process-per-view") { g_process_model = PROCESS_MODEL_PROCESS_PER_VIEW; } else if (model == "process-per-site") { g_process_model = PROCESS_MODEL_PROCESS_PER_SITE; } else if (model == "site-per-process") { g_process_model = PROCESS_MODEL_SITE_PER_PROCESS; } else { LOG(WARNING) << "Invalid process mode: " << model; } } } return g_process_model; } BrowserProcessMain* BrowserProcessMain::GetInstance() { static BrowserProcessMainImpl g_instance; return &g_instance; } } // namespace oxide Commit Message: CWE ID: CWE-20
void BrowserProcessMainImpl::Shutdown() { if (state_ != STATE_STARTED) { CHECK_NE(state_, STATE_SHUTTING_DOWN); return; MessagePump::Get()->Stop(); browser_main_runner_->Shutdown(); browser_main_runner_.reset(); if (process_model_ != PROCESS_MODEL_SINGLE_PROCESS) { BrowserContext::AssertNoContextsExist(); } browser_main_runner_->Shutdown(); browser_main_runner_.reset(); exit_manager_.reset(); main_delegate_.reset(); platform_delegate_.reset(); state_ = STATE_SHUTDOWN; } BrowserProcessMain::BrowserProcessMain() {} BrowserProcessMain::~BrowserProcessMain() {} ProcessModel BrowserProcessMain::GetProcessModelOverrideFromEnv() { static bool g_initialized = false; static ProcessModel g_process_model = PROCESS_MODEL_UNDEFINED; if (g_initialized) { return g_process_model; } g_initialized = true; std::unique_ptr<base::Environment> env = base::Environment::Create(); if (IsEnvironmentOptionEnabled("SINGLE_PROCESS", env.get())) { g_process_model = PROCESS_MODEL_SINGLE_PROCESS; } else { std::string model = GetEnvironmentOption("PROCESS_MODEL", env.get()); if (!model.empty()) { if (model == "multi-process") { g_process_model = PROCESS_MODEL_MULTI_PROCESS; } else if (model == "single-process") { g_process_model = PROCESS_MODEL_SINGLE_PROCESS; } else if (model == "process-per-site-instance") { g_process_model = PROCESS_MODEL_PROCESS_PER_SITE_INSTANCE; } else if (model == "process-per-view") { g_process_model = PROCESS_MODEL_PROCESS_PER_VIEW; } else if (model == "process-per-site") { g_process_model = PROCESS_MODEL_PROCESS_PER_SITE; } else if (model == "site-per-process") { g_process_model = PROCESS_MODEL_SITE_PER_PROCESS; } else { LOG(WARNING) << "Invalid process mode: " << model; } } } return g_process_model; } BrowserProcessMain* BrowserProcessMain::GetInstance() { static BrowserProcessMainImpl g_instance; return &g_instance; } } // namespace oxide
165,424
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int do_check(struct bpf_verifier_env *env) { struct bpf_verifier_state *state; struct bpf_insn *insns = env->prog->insnsi; struct bpf_reg_state *regs; int insn_cnt = env->prog->len; int insn_idx, prev_insn_idx = 0; int insn_processed = 0; bool do_print_state = false; state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); if (!state) return -ENOMEM; env->cur_state = state; init_reg_state(env, state->regs); state->parent = NULL; insn_idx = 0; for (;;) { struct bpf_insn *insn; u8 class; int err; if (insn_idx >= insn_cnt) { verbose(env, "invalid insn idx %d insn_cnt %d\n", insn_idx, insn_cnt); return -EFAULT; } insn = &insns[insn_idx]; class = BPF_CLASS(insn->code); if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { verbose(env, "BPF program is too large. Processed %d insn\n", insn_processed); return -E2BIG; } err = is_state_visited(env, insn_idx); if (err < 0) return err; if (err == 1) { /* found equivalent state, can prune the search */ if (env->log.level) { if (do_print_state) verbose(env, "\nfrom %d to %d: safe\n", prev_insn_idx, insn_idx); else verbose(env, "%d: safe\n", insn_idx); } goto process_bpf_exit; } if (need_resched()) cond_resched(); if (env->log.level > 1 || (env->log.level && do_print_state)) { if (env->log.level > 1) verbose(env, "%d:", insn_idx); else verbose(env, "\nfrom %d to %d:", prev_insn_idx, insn_idx); print_verifier_state(env, state); do_print_state = false; } if (env->log.level) { verbose(env, "%d: ", insn_idx); print_bpf_insn(verbose, env, insn, env->allow_ptr_leaks); } err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx); if (err) return err; regs = cur_regs(env); if (class == BPF_ALU || class == BPF_ALU64) { err = check_alu_op(env, insn); if (err) return err; } else if (class == BPF_LDX) { enum bpf_reg_type *prev_src_type, src_reg_type; /* check for reserved fields is already done */ /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; src_reg_type = regs[insn->src_reg].type; /* check that memory (src_reg + off) is readable, * the state of dst_reg will be updated by this func */ err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, insn->dst_reg); if (err) return err; prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; if (*prev_src_type == NOT_INIT) { /* saw a valid insn * dst_reg = *(u32 *)(src_reg + off) * save type to validate intersecting paths */ *prev_src_type = src_reg_type; } else if (src_reg_type != *prev_src_type && (src_reg_type == PTR_TO_CTX || *prev_src_type == PTR_TO_CTX)) { /* ABuser program is trying to use the same insn * dst_reg = *(u32*) (src_reg + off) * with different pointer types: * src_reg == ctx in one branch and * src_reg == stack|map in some other branch. * Reject it. */ verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_STX) { enum bpf_reg_type *prev_dst_type, dst_reg_type; if (BPF_MODE(insn->code) == BPF_XADD) { err = check_xadd(env, insn_idx, insn); if (err) return err; insn_idx++; continue; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg_type = regs[insn->dst_reg].type; /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, insn->src_reg); if (err) return err; prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type; if (*prev_dst_type == NOT_INIT) { *prev_dst_type = dst_reg_type; } else if (dst_reg_type != *prev_dst_type && (dst_reg_type == PTR_TO_CTX || *prev_dst_type == PTR_TO_CTX)) { verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_ST) { if (BPF_MODE(insn->code) != BPF_MEM || insn->src_reg != BPF_REG_0) { verbose(env, "BPF_ST uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1); if (err) return err; } else if (class == BPF_JMP) { u8 opcode = BPF_OP(insn->code); if (opcode == BPF_CALL) { if (BPF_SRC(insn->code) != BPF_K || insn->off != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_CALL uses reserved fields\n"); return -EINVAL; } err = check_call(env, insn->imm, insn_idx); if (err) return err; } else if (opcode == BPF_JA) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_JA uses reserved fields\n"); return -EINVAL; } insn_idx += insn->off + 1; continue; } else if (opcode == BPF_EXIT) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_EXIT uses reserved fields\n"); return -EINVAL; } /* eBPF calling convetion is such that R0 is used * to return the value from eBPF program. * Make sure that it's readable at this time * of bpf_exit, which means that program wrote * something into it earlier */ err = check_reg_arg(env, BPF_REG_0, SRC_OP); if (err) return err; if (is_pointer_value(env, BPF_REG_0)) { verbose(env, "R0 leaks addr as return value\n"); return -EACCES; } err = check_return_code(env); if (err) return err; process_bpf_exit: err = pop_stack(env, &prev_insn_idx, &insn_idx); if (err < 0) { if (err != -ENOENT) return err; break; } else { do_print_state = true; continue; } } else { err = check_cond_jmp_op(env, insn, &insn_idx); if (err) return err; } } else if (class == BPF_LD) { u8 mode = BPF_MODE(insn->code); if (mode == BPF_ABS || mode == BPF_IND) { err = check_ld_abs(env, insn); if (err) return err; } else if (mode == BPF_IMM) { err = check_ld_imm(env, insn); if (err) return err; insn_idx++; } else { verbose(env, "invalid BPF_LD mode\n"); return -EINVAL; } } else { verbose(env, "unknown insn class %d\n", class); return -EINVAL; } insn_idx++; } verbose(env, "processed %d insns, stack depth %d\n", insn_processed, env->prog->aux->stack_depth); return 0; } Commit Message: bpf: fix branch pruning logic when the verifier detects that register contains a runtime constant and it's compared with another constant it will prune exploration of the branch that is guaranteed not to be taken at runtime. This is all correct, but malicious program may be constructed in such a way that it always has a constant comparison and the other branch is never taken under any conditions. In this case such path through the program will not be explored by the verifier. It won't be taken at run-time either, but since all instructions are JITed the malicious program may cause JITs to complain about using reserved fields, etc. To fix the issue we have to track the instructions explored by the verifier and sanitize instructions that are dead at run time with NOPs. We cannot reject such dead code, since llvm generates it for valid C code, since it doesn't do as much data flow analysis as the verifier does. Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)") Signed-off-by: Alexei Starovoitov <[email protected]> Acked-by: Daniel Borkmann <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> CWE ID: CWE-20
static int do_check(struct bpf_verifier_env *env) { struct bpf_verifier_state *state; struct bpf_insn *insns = env->prog->insnsi; struct bpf_reg_state *regs; int insn_cnt = env->prog->len; int insn_idx, prev_insn_idx = 0; int insn_processed = 0; bool do_print_state = false; state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); if (!state) return -ENOMEM; env->cur_state = state; init_reg_state(env, state->regs); state->parent = NULL; insn_idx = 0; for (;;) { struct bpf_insn *insn; u8 class; int err; if (insn_idx >= insn_cnt) { verbose(env, "invalid insn idx %d insn_cnt %d\n", insn_idx, insn_cnt); return -EFAULT; } insn = &insns[insn_idx]; class = BPF_CLASS(insn->code); if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { verbose(env, "BPF program is too large. Processed %d insn\n", insn_processed); return -E2BIG; } err = is_state_visited(env, insn_idx); if (err < 0) return err; if (err == 1) { /* found equivalent state, can prune the search */ if (env->log.level) { if (do_print_state) verbose(env, "\nfrom %d to %d: safe\n", prev_insn_idx, insn_idx); else verbose(env, "%d: safe\n", insn_idx); } goto process_bpf_exit; } if (need_resched()) cond_resched(); if (env->log.level > 1 || (env->log.level && do_print_state)) { if (env->log.level > 1) verbose(env, "%d:", insn_idx); else verbose(env, "\nfrom %d to %d:", prev_insn_idx, insn_idx); print_verifier_state(env, state); do_print_state = false; } if (env->log.level) { verbose(env, "%d: ", insn_idx); print_bpf_insn(verbose, env, insn, env->allow_ptr_leaks); } err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx); if (err) return err; regs = cur_regs(env); env->insn_aux_data[insn_idx].seen = true; if (class == BPF_ALU || class == BPF_ALU64) { err = check_alu_op(env, insn); if (err) return err; } else if (class == BPF_LDX) { enum bpf_reg_type *prev_src_type, src_reg_type; /* check for reserved fields is already done */ /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; src_reg_type = regs[insn->src_reg].type; /* check that memory (src_reg + off) is readable, * the state of dst_reg will be updated by this func */ err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, insn->dst_reg); if (err) return err; prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; if (*prev_src_type == NOT_INIT) { /* saw a valid insn * dst_reg = *(u32 *)(src_reg + off) * save type to validate intersecting paths */ *prev_src_type = src_reg_type; } else if (src_reg_type != *prev_src_type && (src_reg_type == PTR_TO_CTX || *prev_src_type == PTR_TO_CTX)) { /* ABuser program is trying to use the same insn * dst_reg = *(u32*) (src_reg + off) * with different pointer types: * src_reg == ctx in one branch and * src_reg == stack|map in some other branch. * Reject it. */ verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_STX) { enum bpf_reg_type *prev_dst_type, dst_reg_type; if (BPF_MODE(insn->code) == BPF_XADD) { err = check_xadd(env, insn_idx, insn); if (err) return err; insn_idx++; continue; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg_type = regs[insn->dst_reg].type; /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, insn->src_reg); if (err) return err; prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type; if (*prev_dst_type == NOT_INIT) { *prev_dst_type = dst_reg_type; } else if (dst_reg_type != *prev_dst_type && (dst_reg_type == PTR_TO_CTX || *prev_dst_type == PTR_TO_CTX)) { verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_ST) { if (BPF_MODE(insn->code) != BPF_MEM || insn->src_reg != BPF_REG_0) { verbose(env, "BPF_ST uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1); if (err) return err; } else if (class == BPF_JMP) { u8 opcode = BPF_OP(insn->code); if (opcode == BPF_CALL) { if (BPF_SRC(insn->code) != BPF_K || insn->off != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_CALL uses reserved fields\n"); return -EINVAL; } err = check_call(env, insn->imm, insn_idx); if (err) return err; } else if (opcode == BPF_JA) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_JA uses reserved fields\n"); return -EINVAL; } insn_idx += insn->off + 1; continue; } else if (opcode == BPF_EXIT) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_EXIT uses reserved fields\n"); return -EINVAL; } /* eBPF calling convetion is such that R0 is used * to return the value from eBPF program. * Make sure that it's readable at this time * of bpf_exit, which means that program wrote * something into it earlier */ err = check_reg_arg(env, BPF_REG_0, SRC_OP); if (err) return err; if (is_pointer_value(env, BPF_REG_0)) { verbose(env, "R0 leaks addr as return value\n"); return -EACCES; } err = check_return_code(env); if (err) return err; process_bpf_exit: err = pop_stack(env, &prev_insn_idx, &insn_idx); if (err < 0) { if (err != -ENOENT) return err; break; } else { do_print_state = true; continue; } } else { err = check_cond_jmp_op(env, insn, &insn_idx); if (err) return err; } } else if (class == BPF_LD) { u8 mode = BPF_MODE(insn->code); if (mode == BPF_ABS || mode == BPF_IND) { err = check_ld_abs(env, insn); if (err) return err; } else if (mode == BPF_IMM) { err = check_ld_imm(env, insn); if (err) return err; insn_idx++; env->insn_aux_data[insn_idx].seen = true; } else { verbose(env, "invalid BPF_LD mode\n"); return -EINVAL; } } else { verbose(env, "unknown insn class %d\n", class); return -EINVAL; } insn_idx++; } verbose(env, "processed %d insns, stack depth %d\n", insn_processed, env->prog->aux->stack_depth); return 0; }
167,639
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: xsltCompileLocationPathPattern(xsltParserContextPtr ctxt, int novar) { SKIP_BLANKS; if ((CUR == '/') && (NXT(1) == '/')) { /* * since we reverse the query * a leading // can be safely ignored */ NEXT; NEXT; ctxt->comp->priority = 0.5; /* '//' means not 0 priority */ xsltCompileRelativePathPattern(ctxt, NULL, novar); } else if (CUR == '/') { /* * We need to find root as the parent */ NEXT; SKIP_BLANKS; PUSH(XSLT_OP_ROOT, NULL, NULL, novar); if ((CUR != 0) && (CUR != '|')) { PUSH(XSLT_OP_PARENT, NULL, NULL, novar); xsltCompileRelativePathPattern(ctxt, NULL, novar); } } else if (CUR == '*') { xsltCompileRelativePathPattern(ctxt, NULL, novar); } else if (CUR == '@') { xsltCompileRelativePathPattern(ctxt, NULL, novar); } else { xmlChar *name; name = xsltScanNCName(ctxt); if (name == NULL) { xsltTransformError(NULL, NULL, NULL, "xsltCompileLocationPathPattern : Name expected\n"); ctxt->error = 1; return; } SKIP_BLANKS; if ((CUR == '(') && !xmlXPathIsNodeType(name)) { xsltCompileIdKeyPattern(ctxt, name, 1, novar, 0); if ((CUR == '/') && (NXT(1) == '/')) { PUSH(XSLT_OP_ANCESTOR, NULL, NULL, novar); NEXT; NEXT; SKIP_BLANKS; xsltCompileRelativePathPattern(ctxt, NULL, novar); } else if (CUR == '/') { PUSH(XSLT_OP_PARENT, NULL, NULL, novar); NEXT; SKIP_BLANKS; xsltCompileRelativePathPattern(ctxt, NULL, novar); } return; } xsltCompileRelativePathPattern(ctxt, name, novar); } error: return; } Commit Message: Handle a bad XSLT expression better. BUG=138672 Review URL: https://chromiumcodereview.appspot.com/10830177 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@150123 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
xsltCompileLocationPathPattern(xsltParserContextPtr ctxt, int novar) { SKIP_BLANKS; if ((CUR == '/') && (NXT(1) == '/')) { /* * since we reverse the query * a leading // can be safely ignored */ NEXT; NEXT; ctxt->comp->priority = 0.5; /* '//' means not 0 priority */ xsltCompileRelativePathPattern(ctxt, NULL, novar); } else if (CUR == '/') { /* * We need to find root as the parent */ NEXT; SKIP_BLANKS; PUSH(XSLT_OP_ROOT, NULL, NULL, novar); if ((CUR != 0) && (CUR != '|')) { PUSH(XSLT_OP_PARENT, NULL, NULL, novar); xsltCompileRelativePathPattern(ctxt, NULL, novar); } } else if (CUR == '*') { xsltCompileRelativePathPattern(ctxt, NULL, novar); } else if (CUR == '@') { xsltCompileRelativePathPattern(ctxt, NULL, novar); } else { xmlChar *name; name = xsltScanNCName(ctxt); if (name == NULL) { xsltTransformError(NULL, NULL, NULL, "xsltCompileLocationPathPattern : Name expected\n"); ctxt->error = 1; return; } SKIP_BLANKS; if ((CUR == '(') && !xmlXPathIsNodeType(name)) { xsltCompileIdKeyPattern(ctxt, name, 1, novar, 0); if (ctxt->error) return; if ((CUR == '/') && (NXT(1) == '/')) { PUSH(XSLT_OP_ANCESTOR, NULL, NULL, novar); NEXT; NEXT; SKIP_BLANKS; xsltCompileRelativePathPattern(ctxt, NULL, novar); } else if (CUR == '/') { PUSH(XSLT_OP_PARENT, NULL, NULL, novar); NEXT; SKIP_BLANKS; xsltCompileRelativePathPattern(ctxt, NULL, novar); } return; } xsltCompileRelativePathPattern(ctxt, name, novar); } error: return; }
170,902
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void btif_av_event_deep_copy(uint16_t event, char* p_dest, char* p_src) { BTIF_TRACE_DEBUG("%s", __func__); tBTA_AV* av_src = (tBTA_AV*)p_src; tBTA_AV* av_dest = (tBTA_AV*)p_dest; maybe_non_aligned_memcpy(av_dest, av_src, sizeof(*av_src)); switch (event) { case BTA_AV_META_MSG_EVT: if (av_src->meta_msg.p_data && av_src->meta_msg.len) { av_dest->meta_msg.p_data = (uint8_t*)osi_calloc(av_src->meta_msg.len); memcpy(av_dest->meta_msg.p_data, av_src->meta_msg.p_data, av_src->meta_msg.len); } if (av_src->meta_msg.p_msg) { av_dest->meta_msg.p_msg = (tAVRC_MSG*)osi_calloc(sizeof(tAVRC_MSG)); memcpy(av_dest->meta_msg.p_msg, av_src->meta_msg.p_msg, sizeof(tAVRC_MSG)); tAVRC_MSG* p_msg_src = av_src->meta_msg.p_msg; tAVRC_MSG* p_msg_dest = av_dest->meta_msg.p_msg; if ((p_msg_src->hdr.opcode == AVRC_OP_VENDOR) && (p_msg_src->vendor.p_vendor_data && p_msg_src->vendor.vendor_len)) { p_msg_dest->vendor.p_vendor_data = (uint8_t*)osi_calloc(p_msg_src->vendor.vendor_len); memcpy(p_msg_dest->vendor.p_vendor_data, p_msg_src->vendor.p_vendor_data, p_msg_src->vendor.vendor_len); } } break; default: break; } } Commit Message: DO NOT MERGE AVRC: Copy browse.p_browse_data in btif_av_event_deep_copy p_msg_src->browse.p_browse_data is not copied, but used after the original pointer is freed Bug: 109699112 Test: manual Change-Id: I1d014eb9a8911da6913173a9b11218bf1c89e16e (cherry picked from commit 1d9a58768e6573899c7e80c2b3f52e22f2d8f58b) CWE ID: CWE-416
void btif_av_event_deep_copy(uint16_t event, char* p_dest, char* p_src) { BTIF_TRACE_DEBUG("%s", __func__); tBTA_AV* av_src = (tBTA_AV*)p_src; tBTA_AV* av_dest = (tBTA_AV*)p_dest; maybe_non_aligned_memcpy(av_dest, av_src, sizeof(*av_src)); switch (event) { case BTA_AV_META_MSG_EVT: if (av_src->meta_msg.p_data && av_src->meta_msg.len) { av_dest->meta_msg.p_data = (uint8_t*)osi_calloc(av_src->meta_msg.len); memcpy(av_dest->meta_msg.p_data, av_src->meta_msg.p_data, av_src->meta_msg.len); } if (av_src->meta_msg.p_msg) { av_dest->meta_msg.p_msg = (tAVRC_MSG*)osi_calloc(sizeof(tAVRC_MSG)); memcpy(av_dest->meta_msg.p_msg, av_src->meta_msg.p_msg, sizeof(tAVRC_MSG)); tAVRC_MSG* p_msg_src = av_src->meta_msg.p_msg; tAVRC_MSG* p_msg_dest = av_dest->meta_msg.p_msg; if ((p_msg_src->hdr.opcode == AVRC_OP_VENDOR) && (p_msg_src->vendor.p_vendor_data && p_msg_src->vendor.vendor_len)) { p_msg_dest->vendor.p_vendor_data = (uint8_t*)osi_calloc(p_msg_src->vendor.vendor_len); memcpy(p_msg_dest->vendor.p_vendor_data, p_msg_src->vendor.p_vendor_data, p_msg_src->vendor.vendor_len); } if ((p_msg_src->hdr.opcode == AVRC_OP_BROWSE) && p_msg_src->browse.p_browse_data && p_msg_src->browse.browse_len) { p_msg_dest->browse.p_browse_data = (uint8_t*)osi_calloc(p_msg_src->browse.browse_len); memcpy(p_msg_dest->browse.p_browse_data, p_msg_src->browse.p_browse_data, p_msg_src->browse.browse_len); android_errorWriteLog(0x534e4554, "109699112"); } } break; default: break; } }
174,100
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: context_length_arg (char const *str, int *out) { uintmax_t value; if (! (xstrtoumax (str, 0, 10, &value, "") == LONGINT_OK && 0 <= (*out = value) && *out == value)) { error (EXIT_TROUBLE, 0, "%s: %s", str, _("invalid context length argument")); } page size, unless a read yields a partial page. */ static char *buffer; /* Base of buffer. */ static size_t bufalloc; /* Allocated buffer size, counting slop. */ #define INITIAL_BUFSIZE 32768 /* Initial buffer size, not counting slop. */ static int bufdesc; /* File descriptor. */ static char *bufbeg; /* Beginning of user-visible stuff. */ static char *buflim; /* Limit of user-visible stuff. */ static size_t pagesize; /* alignment of memory pages */ static off_t bufoffset; /* Read offset; defined on regular files. */ static off_t after_last_match; /* Pointer after last matching line that would have been output if we were outputting characters. */ /* Return VAL aligned to the next multiple of ALIGNMENT. VAL can be an integer or a pointer. Both args must be free of side effects. */ #define ALIGN_TO(val, alignment) \ ((size_t) (val) % (alignment) == 0 \ ? (val) \ : (val) + ((alignment) - (size_t) (val) % (alignment))) /* Reset the buffer for a new file, returning zero if we should skip it. Initialize on the first time through. */ static int reset (int fd, char const *file, struct stats *stats) { if (! pagesize) { pagesize = getpagesize (); if (pagesize == 0 || 2 * pagesize + 1 <= pagesize) abort (); bufalloc = ALIGN_TO (INITIAL_BUFSIZE, pagesize) + pagesize + 1; buffer = xmalloc (bufalloc); } bufbeg = buflim = ALIGN_TO (buffer + 1, pagesize); bufbeg[-1] = eolbyte; bufdesc = fd; if (S_ISREG (stats->stat.st_mode)) { if (file) bufoffset = 0; else { bufoffset = lseek (fd, 0, SEEK_CUR); if (bufoffset < 0) { suppressible_error (_("lseek failed"), errno); return 0; } } } return 1; } /* Read new stuff into the buffer, saving the specified amount of old stuff. When we're done, 'bufbeg' points to the beginning of the buffer contents, and 'buflim' points just after the end. Return zero if there's an error. */ static int fillbuf (size_t save, struct stats const *stats) { size_t fillsize = 0; int cc = 1; char *readbuf; size_t readsize; /* Offset from start of buffer to start of old stuff that we want to save. */ size_t saved_offset = buflim - save - buffer; if (pagesize <= buffer + bufalloc - buflim) { readbuf = buflim; bufbeg = buflim - save; } else { size_t minsize = save + pagesize; size_t newsize; size_t newalloc; char *newbuf; /* Grow newsize until it is at least as great as minsize. */ for (newsize = bufalloc - pagesize - 1; newsize < minsize; newsize *= 2) if (newsize * 2 < newsize || newsize * 2 + pagesize + 1 < newsize * 2) xalloc_die (); /* Try not to allocate more memory than the file size indicates, as that might cause unnecessary memory exhaustion if the file is large. However, do not use the original file size as a heuristic if we've already read past the file end, as most likely the file is growing. */ if (S_ISREG (stats->stat.st_mode)) { off_t to_be_read = stats->stat.st_size - bufoffset; off_t maxsize_off = save + to_be_read; if (0 <= to_be_read && to_be_read <= maxsize_off && maxsize_off == (size_t) maxsize_off && minsize <= (size_t) maxsize_off && (size_t) maxsize_off < newsize) newsize = maxsize_off; } /* Add enough room so that the buffer is aligned and has room for byte sentinels fore and aft. */ newalloc = newsize + pagesize + 1; newbuf = bufalloc < newalloc ? xmalloc (bufalloc = newalloc) : buffer; readbuf = ALIGN_TO (newbuf + 1 + save, pagesize); bufbeg = readbuf - save; memmove (bufbeg, buffer + saved_offset, save); bufbeg[-1] = eolbyte; if (newbuf != buffer) { free (buffer); buffer = newbuf; } } readsize = buffer + bufalloc - readbuf; readsize -= readsize % pagesize; if (! fillsize) { ssize_t bytesread; while ((bytesread = read (bufdesc, readbuf, readsize)) < 0 && errno == EINTR) continue; if (bytesread < 0) cc = 0; else fillsize = bytesread; } bufoffset += fillsize; #if defined HAVE_DOS_FILE_CONTENTS if (fillsize) fillsize = undossify_input (readbuf, fillsize); #endif buflim = readbuf + fillsize; return cc; } /* Flags controlling the style of output. */ static enum { BINARY_BINARY_FILES, TEXT_BINARY_FILES, WITHOUT_MATCH_BINARY_FILES } binary_files; /* How to handle binary files. */ static int filename_mask; /* If zero, output nulls after filenames. */ static int out_quiet; /* Suppress all normal output. */ static int out_invert; /* Print nonmatching stuff. */ static int out_file; /* Print filenames. */ static int out_line; /* Print line numbers. */ static int out_byte; /* Print byte offsets. */ static int out_before; /* Lines of leading context. */ static int out_after; /* Lines of trailing context. */ static int out_file; /* Print filenames. */ static int out_line; /* Print line numbers. */ static int out_byte; /* Print byte offsets. */ static int out_before; /* Lines of leading context. */ static int out_after; /* Lines of trailing context. */ static int count_matches; /* Count matching lines. */ static int list_files; /* List matching files. */ static int no_filenames; /* Suppress file names. */ static off_t max_count; /* Stop after outputting this many lines from an input file. */ static int line_buffered; /* If nonzero, use line buffering, i.e. fflush everyline out. */ static char const *lastnl; /* Pointer after last newline counted. */ static char const *lastout; /* Pointer after last character output; NULL if no character has been output or if it's conceptually before bufbeg. */ static uintmax_t totalnl; /* Total newline count before lastnl. */ static off_t outleft; /* Maximum number of lines to be output. */ static int pending; /* Pending lines of output. NULL if no character has been output or if it's conceptually before bufbeg. */ static uintmax_t totalnl; /* Total newline count before lastnl. */ static off_t outleft; /* Maximum number of lines to be output. */ static int pending; /* Pending lines of output. Always kept 0 if out_quiet is true. */ static int done_on_match; /* Stop scanning file on first match. */ static int exit_on_match; /* Exit on first match. */ /* Add two numbers that count input bytes or lines, and report an error if the addition overflows. */ static uintmax_t add_count (uintmax_t a, uintmax_t b) { uintmax_t sum = a + b; if (sum < a) error (EXIT_TROUBLE, 0, _("input is too large to count")); return sum; } static void nlscan (char const *lim) { size_t newlines = 0; char const *beg; for (beg = lastnl; beg < lim; beg++) { beg = memchr (beg, eolbyte, lim - beg); if (!beg) break; newlines++; } totalnl = add_count (totalnl, newlines); lastnl = lim; } /* Print the current filename. */ static void print_filename (void) { pr_sgr_start_if (filename_color); fputs (filename, stdout); pr_sgr_end_if (filename_color); } /* Print a character separator. */ static void print_sep (char sep) { pr_sgr_start_if (sep_color); fputc (sep, stdout); pr_sgr_end_if (sep_color); } /* Print a line number or a byte offset. */ static void print_offset (uintmax_t pos, int min_width, const char *color) { /* Do not rely on printf to print pos, since uintmax_t may be longer than long, and long long is not portable. */ char buf[sizeof pos * CHAR_BIT]; char *p = buf + sizeof buf; do { *--p = '0' + pos % 10; --min_width; } while ((pos /= 10) != 0); /* Do this to maximize the probability of alignment across lines. */ if (align_tabs) while (--min_width >= 0) *--p = ' '; pr_sgr_start_if (color); fwrite (p, 1, buf + sizeof buf - p, stdout); pr_sgr_end_if (color); } /* Print a whole line head (filename, line, byte). */ static void print_line_head (char const *beg, char const *lim, int sep) { int pending_sep = 0; if (out_file) { print_filename (); if (filename_mask) pending_sep = 1; else fputc (0, stdout); } if (out_line) { if (lastnl < lim) { nlscan (beg); totalnl = add_count (totalnl, 1); lastnl = lim; } if (pending_sep) print_sep (sep); print_offset (totalnl, 4, line_num_color); pending_sep = 1; } if (out_byte) { uintmax_t pos = add_count (totalcc, beg - bufbeg); #if defined HAVE_DOS_FILE_CONTENTS pos = dossified_pos (pos); #endif if (pending_sep) print_sep (sep); print_offset (pos, 6, byte_num_color); pending_sep = 1; } if (pending_sep) { /* This assumes sep is one column wide. Try doing this any other way with Unicode (and its combining and wide characters) filenames and you're wasting your efforts. */ if (align_tabs) fputs ("\t\b", stdout); print_sep (sep); } } static const char * print_line_middle (const char *beg, const char *lim, const char *line_color, const char *match_color) { size_t match_size; size_t match_offset; const char *cur = beg; const char *mid = NULL; while (cur < lim && ((match_offset = execute (beg, lim - beg, &match_size, beg + (cur - beg))) != (size_t) -1)) { char const *b = beg + match_offset; /* Avoid matching the empty line at the end of the buffer. */ if (b == lim) break; /* Avoid hanging on grep --color "" foo */ if (match_size == 0) { /* Make minimal progress; there may be further non-empty matches. */ /* XXX - Could really advance by one whole multi-octet character. */ match_size = 1; if (!mid) mid = cur; } else { /* This function is called on a matching line only, but is it selected or rejected/context? */ if (only_matching) print_line_head (b, lim, (out_invert ? SEP_CHAR_REJECTED : SEP_CHAR_SELECTED)); else { pr_sgr_start (line_color); if (mid) { cur = mid; mid = NULL; } fwrite (cur, sizeof (char), b - cur, stdout); } pr_sgr_start_if (match_color); fwrite (b, sizeof (char), match_size, stdout); pr_sgr_end_if (match_color); if (only_matching) fputs ("\n", stdout); } cur = b + match_size; } if (only_matching) cur = lim; else if (mid) cur = mid; return cur; } static const char * print_line_tail (const char *beg, const char *lim, const char *line_color) { size_t eol_size; size_t tail_size; eol_size = (lim > beg && lim[-1] == eolbyte); eol_size += (lim - eol_size > beg && lim[-(1 + eol_size)] == '\r'); tail_size = lim - eol_size - beg; if (tail_size > 0) { pr_sgr_start (line_color); fwrite (beg, 1, tail_size, stdout); beg += tail_size; pr_sgr_end (line_color); } return beg; } static void prline (char const *beg, char const *lim, int sep) { int matching; const char *line_color; const char *match_color; if (!only_matching) print_line_head (beg, lim, sep); matching = (sep == SEP_CHAR_SELECTED) ^ !!out_invert; if (color_option) { line_color = (((sep == SEP_CHAR_SELECTED) ^ (out_invert && (color_option < 0))) ? selected_line_color : context_line_color); match_color = (sep == SEP_CHAR_SELECTED ? selected_match_color : context_match_color); } else line_color = match_color = NULL; /* Shouldn't be used. */ if ((only_matching && matching) || (color_option && (*line_color || *match_color))) { /* We already know that non-matching lines have no match (to colorize). */ if (matching && (only_matching || *match_color)) beg = print_line_middle (beg, lim, line_color, match_color); /* FIXME: this test may be removable. */ if (!only_matching && *line_color) beg = print_line_tail (beg, lim, line_color); } if (!only_matching && lim > beg) fwrite (beg, 1, lim - beg, stdout); if (ferror (stdout)) { write_error_seen = 1; error (EXIT_TROUBLE, 0, _("write error")); } lastout = lim; if (line_buffered) fflush (stdout); } /* Print pending lines of trailing context prior to LIM. Trailing context ends at the next matching line when OUTLEFT is 0. */ static void prpending (char const *lim) { if (!lastout) lastout = bufbeg; while (pending > 0 && lastout < lim) { char const *nl = memchr (lastout, eolbyte, lim - lastout); size_t match_size; --pending; if (outleft || ((execute (lastout, nl + 1 - lastout, &match_size, NULL) == (size_t) -1) == !out_invert)) prline (lastout, nl + 1, SEP_CHAR_REJECTED); else pending = 0; } } /* Print the lines between BEG and LIM. Deal with context crap. If NLINESP is non-null, store a count of lines between BEG and LIM. */ static void prtext (char const *beg, char const *lim, int *nlinesp) { /* Print the lines between BEG and LIM. Deal with context crap. If NLINESP is non-null, store a count of lines between BEG and LIM. */ static void prtext (char const *beg, char const *lim, int *nlinesp) { static int used; /* avoid printing SEP_STR_GROUP before any output */ char const *bp, *p; char eol = eolbyte; int i, n; if (!out_quiet && pending > 0) prpending (beg); /* Deal with leading context crap. */ bp = lastout ? lastout : bufbeg; for (i = 0; i < out_before; ++i) if (p > bp) do --p; while (p[-1] != eol); /* We print the SEP_STR_GROUP separator only if our output is discontiguous from the last output in the file. */ if ((out_before || out_after) && used && p != lastout && group_separator) { pr_sgr_start_if (sep_color); fputs (group_separator, stdout); pr_sgr_end_if (sep_color); fputc ('\n', stdout); } while (p < beg) { char const *nl = memchr (p, eol, beg - p); nl++; prline (p, nl, SEP_CHAR_REJECTED); p = nl; } } if (nlinesp) { /* Caller wants a line count. */ for (n = 0; p < lim && n < outleft; n++) { char const *nl = memchr (p, eol, lim - p); nl++; if (!out_quiet) prline (p, nl, SEP_CHAR_SELECTED); p = nl; } *nlinesp = n; /* relying on it that this function is never called when outleft = 0. */ after_last_match = bufoffset - (buflim - p); } else if (!out_quiet) prline (beg, lim, SEP_CHAR_SELECTED); pending = out_quiet ? 0 : out_after; used = 1; } static size_t do_execute (char const *buf, size_t size, size_t *match_size, char const *start_ptr) { size_t result; const char *line_next; /* With the current implementation, using --ignore-case with a multi-byte character set is very inefficient when applied to a large buffer containing many matches. We can avoid much of the wasted effort by matching line-by-line. FIXME: this is just an ugly workaround, and it doesn't really belong here. Also, PCRE is always using this same per-line matching algorithm. Either we fix -i, or we should refactor this code---for example, we could add another function pointer to struct matcher to split the buffer passed to execute. It would perform the memchr if line-by-line matching is necessary, or just return buf + size otherwise. */ if (MB_CUR_MAX == 1 || !match_icase) return execute (buf, size, match_size, start_ptr); for (line_next = buf; line_next < buf + size; ) { const char *line_buf = line_next; const char *line_end = memchr (line_buf, eolbyte, (buf + size) - line_buf); if (line_end == NULL) line_next = line_end = buf + size; else line_next = line_end + 1; if (start_ptr && start_ptr >= line_end) continue; result = execute (line_buf, line_next - line_buf, match_size, start_ptr); if (result != (size_t) -1) return (line_buf - buf) + result; } return (size_t) -1; } /* Scan the specified portion of the buffer, matching lines (or between matching lines if OUT_INVERT is true). Return a count of lines printed. */ static int grepbuf (char const *beg, char const *lim) /* Scan the specified portion of the buffer, matching lines (or between matching lines if OUT_INVERT is true). Return a count of lines printed. */ static int grepbuf (char const *beg, char const *lim) { int nlines, n; char const *p; size_t match_offset; size_t match_size; { char const *b = p + match_offset; char const *endp = b + match_size; /* Avoid matching the empty line at the end of the buffer. */ if (b == lim) break; if (!out_invert) { prtext (b, endp, (int *) 0); nlines++; break; if (!out_invert) { prtext (b, endp, (int *) 0); nlines++; outleft--; if (!outleft || done_on_match) } } else if (p < b) { prtext (p, b, &n); nlines += n; outleft -= n; if (!outleft) return nlines; } p = endp; } if (out_invert && p < lim) { prtext (p, lim, &n); nlines += n; outleft -= n; } return nlines; } /* Search a given file. Normally, return a count of lines printed; but if the file is a directory and we search it recursively, then return -2 if there was a match, and -1 otherwise. */ static int grep (int fd, char const *file, struct stats *stats) /* Search a given file. Normally, return a count of lines printed; but if the file is a directory and we search it recursively, then return -2 if there was a match, and -1 otherwise. */ static int grep (int fd, char const *file, struct stats *stats) { int nlines, i; int not_text; size_t residue, save; char oldc; return 0; if (file && directories == RECURSE_DIRECTORIES && S_ISDIR (stats->stat.st_mode)) { /* Close fd now, so that we don't open a lot of file descriptors when we recurse deeply. */ if (close (fd) != 0) suppressible_error (file, errno); return grepdir (file, stats) - 2; } totalcc = 0; lastout = 0; totalnl = 0; outleft = max_count; after_last_match = 0; pending = 0; nlines = 0; residue = 0; save = 0; if (! fillbuf (save, stats)) { suppressible_error (filename, errno); return 0; } not_text = (((binary_files == BINARY_BINARY_FILES && !out_quiet) || binary_files == WITHOUT_MATCH_BINARY_FILES) && memchr (bufbeg, eol ? '\0' : '\200', buflim - bufbeg)); if (not_text && binary_files == WITHOUT_MATCH_BINARY_FILES) return 0; done_on_match += not_text; out_quiet += not_text; for (;;) { lastnl = bufbeg; if (lastout) lastout = bufbeg; beg = bufbeg + save; /* no more data to scan (eof) except for maybe a residue -> break */ if (beg == buflim) break; /* Determine new residue (the length of an incomplete line at the end of the buffer, 0 means there is no incomplete last line). */ oldc = beg[-1]; beg[-1] = eol; for (lim = buflim; lim[-1] != eol; lim--) continue; beg[-1] = oldc; if (lim == beg) lim = beg - residue; beg -= residue; residue = buflim - lim; if (beg < lim) { if (outleft) nlines += grepbuf (beg, lim); if (pending) prpending (lim); if ((!outleft && !pending) || (nlines && done_on_match && !out_invert)) goto finish_grep; } /* The last OUT_BEFORE lines at the end of the buffer will be needed as leading context if there is a matching line at the begin of the next data. Make beg point to their begin. */ i = 0; beg = lim; while (i < out_before && beg > bufbeg && beg != lastout) { ++i; do --beg; while (beg[-1] != eol); } /* detect if leading context is discontinuous from last printed line. */ if (beg != lastout) lastout = 0; /* Handle some details and read more data to scan. */ save = residue + lim - beg; if (out_byte) totalcc = add_count (totalcc, buflim - bufbeg - save); if (out_line) nlscan (beg); if (! fillbuf (save, stats)) { suppressible_error (filename, errno); goto finish_grep; } } if (residue) { *buflim++ = eol; if (outleft) nlines += grepbuf (bufbeg + save - residue, buflim); if (pending) prpending (buflim); } finish_grep: done_on_match -= not_text; out_quiet -= not_text; if ((not_text & ~out_quiet) && nlines != 0) printf (_("Binary file %s matches\n"), filename); return nlines; } static int grepfile (char const *file, struct stats *stats) { int desc; int count; int status; grepfile (char const *file, struct stats *stats) { int desc; int count; int status; filename = (file ? file : label ? label : _("(standard input)")); /* Don't open yet, since that might have side effects on a device. */ desc = -1; } else { /* When skipping directories, don't worry about directories that can't be opened. */ desc = open (file, O_RDONLY); if (desc < 0 && directories != SKIP_DIRECTORIES) { suppressible_error (file, errno); return 1; } } if (desc < 0 ? stat (file, &stats->stat) != 0 : fstat (desc, &stats->stat) != 0) { suppressible_error (filename, errno); if (file) close (desc); return 1; } if ((directories == SKIP_DIRECTORIES && S_ISDIR (stats->stat.st_mode)) || (devices == SKIP_DEVICES && (S_ISCHR (stats->stat.st_mode) || S_ISBLK (stats->stat.st_mode) || S_ISSOCK (stats->stat.st_mode) || S_ISFIFO (stats->stat.st_mode)))) { if (file) close (desc); return 1; } /* If there is a regular file on stdout and the current file refers to the same i-node, we have to report the problem and skip it. Otherwise when matching lines from some other input reach the disk before we open this file, we can end up reading and matching those lines and appending them to the file from which we're reading. Then we'd have what appears to be an infinite loop that'd terminate only upon filling the output file system or reaching a quota. However, there is no risk of an infinite loop if grep is generating no output, i.e., with --silent, --quiet, -q. Similarly, with any of these: --max-count=N (-m) (for N >= 2) --files-with-matches (-l) --files-without-match (-L) there is no risk of trouble. For --max-count=1, grep stops after printing the first match, so there is no risk of malfunction. But even --max-count=2, with input==output, while there is no risk of infloop, there is a race condition that could result in "alternate" output. */ if (!out_quiet && list_files == 0 && 1 < max_count && S_ISREG (out_stat.st_mode) && out_stat.st_ino && SAME_INODE (stats->stat, out_stat)) { if (! suppress_errors) error (0, 0, _("input file %s is also the output"), quote (filename)); errseen = 1; if (file) close (desc); return 1; } if (desc < 0) { desc = open (file, O_RDONLY); if (desc < 0) { suppressible_error (file, errno); return 1; } } #if defined SET_BINARY /* Set input to binary mode. Pipes are simulated with files on DOS, so this includes the case of "foo | grep bar". */ if (!isatty (desc)) SET_BINARY (desc); #endif count = grep (desc, file, stats); if (count < 0) status = count + 2; else { if (count_matches) { if (out_file) { print_filename (); if (filename_mask) print_sep (SEP_CHAR_SELECTED); else fputc (0, stdout); } printf ("%d\n", count); } else fputc (0, stdout); } printf ("%d\n", count); } status = !count; if (! file) { off_t required_offset = outleft ? bufoffset : after_last_match; if (required_offset != bufoffset && lseek (desc, required_offset, SEEK_SET) < 0 && S_ISREG (stats->stat.st_mode)) suppressible_error (filename, errno); } else while (close (desc) != 0) if (errno != EINTR) { suppressible_error (file, errno); break; } } Commit Message: CWE ID: CWE-189
context_length_arg (char const *str, int *out) context_length_arg (char const *str, intmax_t *out) { switch (xstrtoimax (str, 0, 10, out, "")) { case LONGINT_OK: case LONGINT_OVERFLOW: if (0 <= *out) break; /* Fall through. */ default: error (EXIT_TROUBLE, 0, "%s: %s", str, _("invalid context length argument")); } page size, unless a read yields a partial page. */ static char *buffer; /* Base of buffer. */ static size_t bufalloc; /* Allocated buffer size, counting slop. */ #define INITIAL_BUFSIZE 32768 /* Initial buffer size, not counting slop. */ static int bufdesc; /* File descriptor. */ static char *bufbeg; /* Beginning of user-visible stuff. */ static char *buflim; /* Limit of user-visible stuff. */ static size_t pagesize; /* alignment of memory pages */ static off_t bufoffset; /* Read offset; defined on regular files. */ static off_t after_last_match; /* Pointer after last matching line that would have been output if we were outputting characters. */ /* Return VAL aligned to the next multiple of ALIGNMENT. VAL can be an integer or a pointer. Both args must be free of side effects. */ #define ALIGN_TO(val, alignment) \ ((size_t) (val) % (alignment) == 0 \ ? (val) \ : (val) + ((alignment) - (size_t) (val) % (alignment))) /* Reset the buffer for a new file, returning zero if we should skip it. Initialize on the first time through. */ static int reset (int fd, char const *file, struct stats *stats) { if (! pagesize) { pagesize = getpagesize (); if (pagesize == 0 || 2 * pagesize + 1 <= pagesize) abort (); bufalloc = ALIGN_TO (INITIAL_BUFSIZE, pagesize) + pagesize + 1; buffer = xmalloc (bufalloc); } bufbeg = buflim = ALIGN_TO (buffer + 1, pagesize); bufbeg[-1] = eolbyte; bufdesc = fd; if (S_ISREG (stats->stat.st_mode)) { if (file) bufoffset = 0; else { bufoffset = lseek (fd, 0, SEEK_CUR); if (bufoffset < 0) { suppressible_error (_("lseek failed"), errno); return 0; } } } return 1; } /* Read new stuff into the buffer, saving the specified amount of old stuff. When we're done, 'bufbeg' points to the beginning of the buffer contents, and 'buflim' points just after the end. Return zero if there's an error. */ static int fillbuf (size_t save, struct stats const *stats) { size_t fillsize = 0; int cc = 1; char *readbuf; size_t readsize; /* Offset from start of buffer to start of old stuff that we want to save. */ size_t saved_offset = buflim - save - buffer; if (pagesize <= buffer + bufalloc - buflim) { readbuf = buflim; bufbeg = buflim - save; } else { size_t minsize = save + pagesize; size_t newsize; size_t newalloc; char *newbuf; /* Grow newsize until it is at least as great as minsize. */ for (newsize = bufalloc - pagesize - 1; newsize < minsize; newsize *= 2) if (newsize * 2 < newsize || newsize * 2 + pagesize + 1 < newsize * 2) xalloc_die (); /* Try not to allocate more memory than the file size indicates, as that might cause unnecessary memory exhaustion if the file is large. However, do not use the original file size as a heuristic if we've already read past the file end, as most likely the file is growing. */ if (S_ISREG (stats->stat.st_mode)) { off_t to_be_read = stats->stat.st_size - bufoffset; off_t maxsize_off = save + to_be_read; if (0 <= to_be_read && to_be_read <= maxsize_off && maxsize_off == (size_t) maxsize_off && minsize <= (size_t) maxsize_off && (size_t) maxsize_off < newsize) newsize = maxsize_off; } /* Add enough room so that the buffer is aligned and has room for byte sentinels fore and aft. */ newalloc = newsize + pagesize + 1; newbuf = bufalloc < newalloc ? xmalloc (bufalloc = newalloc) : buffer; readbuf = ALIGN_TO (newbuf + 1 + save, pagesize); bufbeg = readbuf - save; memmove (bufbeg, buffer + saved_offset, save); bufbeg[-1] = eolbyte; if (newbuf != buffer) { free (buffer); buffer = newbuf; } } readsize = buffer + bufalloc - readbuf; readsize -= readsize % pagesize; if (! fillsize) { ssize_t bytesread; while ((bytesread = read (bufdesc, readbuf, readsize)) < 0 && errno == EINTR) continue; if (bytesread < 0) cc = 0; else fillsize = bytesread; } bufoffset += fillsize; #if defined HAVE_DOS_FILE_CONTENTS if (fillsize) fillsize = undossify_input (readbuf, fillsize); #endif buflim = readbuf + fillsize; return cc; } /* Flags controlling the style of output. */ static enum { BINARY_BINARY_FILES, TEXT_BINARY_FILES, WITHOUT_MATCH_BINARY_FILES } binary_files; /* How to handle binary files. */ static int filename_mask; /* If zero, output nulls after filenames. */ static int out_quiet; /* Suppress all normal output. */ static int out_invert; /* Print nonmatching stuff. */ static int out_file; /* Print filenames. */ static int out_line; /* Print line numbers. */ static int out_byte; /* Print byte offsets. */ static int out_before; /* Lines of leading context. */ static int out_after; /* Lines of trailing context. */ static int out_file; /* Print filenames. */ static int out_line; /* Print line numbers. */ static int out_byte; /* Print byte offsets. */ static intmax_t out_before; /* Lines of leading context. */ static intmax_t out_after; /* Lines of trailing context. */ static int count_matches; /* Count matching lines. */ static int list_files; /* List matching files. */ static int no_filenames; /* Suppress file names. */ static intmax_t max_count; /* Stop after outputting this many lines from an input file. */ static int line_buffered; /* If nonzero, use line buffering, i.e. fflush everyline out. */ static char const *lastnl; /* Pointer after last newline counted. */ static char const *lastout; /* Pointer after last character output; NULL if no character has been output or if it's conceptually before bufbeg. */ static uintmax_t totalnl; /* Total newline count before lastnl. */ static off_t outleft; /* Maximum number of lines to be output. */ static int pending; /* Pending lines of output. NULL if no character has been output or if it's conceptually before bufbeg. */ static uintmax_t totalnl; /* Total newline count before lastnl. */ static intmax_t outleft; /* Maximum number of lines to be output. */ static intmax_t pending; /* Pending lines of output. Always kept 0 if out_quiet is true. */ static int done_on_match; /* Stop scanning file on first match. */ static int exit_on_match; /* Exit on first match. */ /* Add two numbers that count input bytes or lines, and report an error if the addition overflows. */ static uintmax_t add_count (uintmax_t a, uintmax_t b) { uintmax_t sum = a + b; if (sum < a) error (EXIT_TROUBLE, 0, _("input is too large to count")); return sum; } static void nlscan (char const *lim) { size_t newlines = 0; char const *beg; for (beg = lastnl; beg < lim; beg++) { beg = memchr (beg, eolbyte, lim - beg); if (!beg) break; newlines++; } totalnl = add_count (totalnl, newlines); lastnl = lim; } /* Print the current filename. */ static void print_filename (void) { pr_sgr_start_if (filename_color); fputs (filename, stdout); pr_sgr_end_if (filename_color); } /* Print a character separator. */ static void print_sep (char sep) { pr_sgr_start_if (sep_color); fputc (sep, stdout); pr_sgr_end_if (sep_color); } /* Print a line number or a byte offset. */ static void print_offset (uintmax_t pos, int min_width, const char *color) { /* Do not rely on printf to print pos, since uintmax_t may be longer than long, and long long is not portable. */ char buf[sizeof pos * CHAR_BIT]; char *p = buf + sizeof buf; do { *--p = '0' + pos % 10; --min_width; } while ((pos /= 10) != 0); /* Do this to maximize the probability of alignment across lines. */ if (align_tabs) while (--min_width >= 0) *--p = ' '; pr_sgr_start_if (color); fwrite (p, 1, buf + sizeof buf - p, stdout); pr_sgr_end_if (color); } /* Print a whole line head (filename, line, byte). */ static void print_line_head (char const *beg, char const *lim, int sep) { int pending_sep = 0; if (out_file) { print_filename (); if (filename_mask) pending_sep = 1; else fputc (0, stdout); } if (out_line) { if (lastnl < lim) { nlscan (beg); totalnl = add_count (totalnl, 1); lastnl = lim; } if (pending_sep) print_sep (sep); print_offset (totalnl, 4, line_num_color); pending_sep = 1; } if (out_byte) { uintmax_t pos = add_count (totalcc, beg - bufbeg); #if defined HAVE_DOS_FILE_CONTENTS pos = dossified_pos (pos); #endif if (pending_sep) print_sep (sep); print_offset (pos, 6, byte_num_color); pending_sep = 1; } if (pending_sep) { /* This assumes sep is one column wide. Try doing this any other way with Unicode (and its combining and wide characters) filenames and you're wasting your efforts. */ if (align_tabs) fputs ("\t\b", stdout); print_sep (sep); } } static const char * print_line_middle (const char *beg, const char *lim, const char *line_color, const char *match_color) { size_t match_size; size_t match_offset; const char *cur = beg; const char *mid = NULL; while (cur < lim && ((match_offset = execute (beg, lim - beg, &match_size, beg + (cur - beg))) != (size_t) -1)) { char const *b = beg + match_offset; /* Avoid matching the empty line at the end of the buffer. */ if (b == lim) break; /* Avoid hanging on grep --color "" foo */ if (match_size == 0) { /* Make minimal progress; there may be further non-empty matches. */ /* XXX - Could really advance by one whole multi-octet character. */ match_size = 1; if (!mid) mid = cur; } else { /* This function is called on a matching line only, but is it selected or rejected/context? */ if (only_matching) print_line_head (b, lim, (out_invert ? SEP_CHAR_REJECTED : SEP_CHAR_SELECTED)); else { pr_sgr_start (line_color); if (mid) { cur = mid; mid = NULL; } fwrite (cur, sizeof (char), b - cur, stdout); } pr_sgr_start_if (match_color); fwrite (b, sizeof (char), match_size, stdout); pr_sgr_end_if (match_color); if (only_matching) fputs ("\n", stdout); } cur = b + match_size; } if (only_matching) cur = lim; else if (mid) cur = mid; return cur; } static const char * print_line_tail (const char *beg, const char *lim, const char *line_color) { size_t eol_size; size_t tail_size; eol_size = (lim > beg && lim[-1] == eolbyte); eol_size += (lim - eol_size > beg && lim[-(1 + eol_size)] == '\r'); tail_size = lim - eol_size - beg; if (tail_size > 0) { pr_sgr_start (line_color); fwrite (beg, 1, tail_size, stdout); beg += tail_size; pr_sgr_end (line_color); } return beg; } static void prline (char const *beg, char const *lim, int sep) { int matching; const char *line_color; const char *match_color; if (!only_matching) print_line_head (beg, lim, sep); matching = (sep == SEP_CHAR_SELECTED) ^ !!out_invert; if (color_option) { line_color = (((sep == SEP_CHAR_SELECTED) ^ (out_invert && (color_option < 0))) ? selected_line_color : context_line_color); match_color = (sep == SEP_CHAR_SELECTED ? selected_match_color : context_match_color); } else line_color = match_color = NULL; /* Shouldn't be used. */ if ((only_matching && matching) || (color_option && (*line_color || *match_color))) { /* We already know that non-matching lines have no match (to colorize). */ if (matching && (only_matching || *match_color)) beg = print_line_middle (beg, lim, line_color, match_color); /* FIXME: this test may be removable. */ if (!only_matching && *line_color) beg = print_line_tail (beg, lim, line_color); } if (!only_matching && lim > beg) fwrite (beg, 1, lim - beg, stdout); if (ferror (stdout)) { write_error_seen = 1; error (EXIT_TROUBLE, 0, _("write error")); } lastout = lim; if (line_buffered) fflush (stdout); } /* Print pending lines of trailing context prior to LIM. Trailing context ends at the next matching line when OUTLEFT is 0. */ static void prpending (char const *lim) { if (!lastout) lastout = bufbeg; while (pending > 0 && lastout < lim) { char const *nl = memchr (lastout, eolbyte, lim - lastout); size_t match_size; --pending; if (outleft || ((execute (lastout, nl + 1 - lastout, &match_size, NULL) == (size_t) -1) == !out_invert)) prline (lastout, nl + 1, SEP_CHAR_REJECTED); else pending = 0; } } /* Print the lines between BEG and LIM. Deal with context crap. If NLINESP is non-null, store a count of lines between BEG and LIM. */ static void prtext (char const *beg, char const *lim, int *nlinesp) { /* Print the lines between BEG and LIM. Deal with context crap. If NLINESP is non-null, store a count of lines between BEG and LIM. */ static void prtext (char const *beg, char const *lim, intmax_t *nlinesp) { static int used; /* avoid printing SEP_STR_GROUP before any output */ char const *bp, *p; char eol = eolbyte; intmax_t i, n; if (!out_quiet && pending > 0) prpending (beg); /* Deal with leading context crap. */ bp = lastout ? lastout : bufbeg; for (i = 0; i < out_before; ++i) if (p > bp) do --p; while (p[-1] != eol); /* We print the SEP_STR_GROUP separator only if our output is discontiguous from the last output in the file. */ if ((out_before || out_after) && used && p != lastout && group_separator) { pr_sgr_start_if (sep_color); fputs (group_separator, stdout); pr_sgr_end_if (sep_color); fputc ('\n', stdout); } while (p < beg) { char const *nl = memchr (p, eol, beg - p); nl++; prline (p, nl, SEP_CHAR_REJECTED); p = nl; } } if (nlinesp) { /* Caller wants a line count. */ for (n = 0; p < lim && n < outleft; n++) { char const *nl = memchr (p, eol, lim - p); nl++; if (!out_quiet) prline (p, nl, SEP_CHAR_SELECTED); p = nl; } *nlinesp = n; /* relying on it that this function is never called when outleft = 0. */ after_last_match = bufoffset - (buflim - p); } else if (!out_quiet) prline (beg, lim, SEP_CHAR_SELECTED); pending = out_quiet ? 0 : out_after; used = 1; } static size_t do_execute (char const *buf, size_t size, size_t *match_size, char const *start_ptr) { size_t result; const char *line_next; /* With the current implementation, using --ignore-case with a multi-byte character set is very inefficient when applied to a large buffer containing many matches. We can avoid much of the wasted effort by matching line-by-line. FIXME: this is just an ugly workaround, and it doesn't really belong here. Also, PCRE is always using this same per-line matching algorithm. Either we fix -i, or we should refactor this code---for example, we could add another function pointer to struct matcher to split the buffer passed to execute. It would perform the memchr if line-by-line matching is necessary, or just return buf + size otherwise. */ if (MB_CUR_MAX == 1 || !match_icase) return execute (buf, size, match_size, start_ptr); for (line_next = buf; line_next < buf + size; ) { const char *line_buf = line_next; const char *line_end = memchr (line_buf, eolbyte, (buf + size) - line_buf); if (line_end == NULL) line_next = line_end = buf + size; else line_next = line_end + 1; if (start_ptr && start_ptr >= line_end) continue; result = execute (line_buf, line_next - line_buf, match_size, start_ptr); if (result != (size_t) -1) return (line_buf - buf) + result; } return (size_t) -1; } /* Scan the specified portion of the buffer, matching lines (or between matching lines if OUT_INVERT is true). Return a count of lines printed. */ static int grepbuf (char const *beg, char const *lim) /* Scan the specified portion of the buffer, matching lines (or between matching lines if OUT_INVERT is true). Return a count of lines printed. */ static intmax_t grepbuf (char const *beg, char const *lim) { intmax_t nlines, n; char const *p; size_t match_offset; size_t match_size; { char const *b = p + match_offset; char const *endp = b + match_size; /* Avoid matching the empty line at the end of the buffer. */ if (b == lim) break; if (!out_invert) { prtext (b, endp, (int *) 0); nlines++; break; if (!out_invert) { prtext (b, endp, NULL); nlines++; outleft--; if (!outleft || done_on_match) } } else if (p < b) { prtext (p, b, &n); nlines += n; outleft -= n; if (!outleft) return nlines; } p = endp; } if (out_invert && p < lim) { prtext (p, lim, &n); nlines += n; outleft -= n; } return nlines; } /* Search a given file. Normally, return a count of lines printed; but if the file is a directory and we search it recursively, then return -2 if there was a match, and -1 otherwise. */ static int grep (int fd, char const *file, struct stats *stats) /* Search a given file. Normally, return a count of lines printed; but if the file is a directory and we search it recursively, then return -2 if there was a match, and -1 otherwise. */ static intmax_t grep (int fd, char const *file, struct stats *stats) { intmax_t nlines, i; int not_text; size_t residue, save; char oldc; return 0; if (file && directories == RECURSE_DIRECTORIES && S_ISDIR (stats->stat.st_mode)) { /* Close fd now, so that we don't open a lot of file descriptors when we recurse deeply. */ if (close (fd) != 0) suppressible_error (file, errno); return grepdir (file, stats) - 2; } totalcc = 0; lastout = 0; totalnl = 0; outleft = max_count; after_last_match = 0; pending = 0; nlines = 0; residue = 0; save = 0; if (! fillbuf (save, stats)) { suppressible_error (filename, errno); return 0; } not_text = (((binary_files == BINARY_BINARY_FILES && !out_quiet) || binary_files == WITHOUT_MATCH_BINARY_FILES) && memchr (bufbeg, eol ? '\0' : '\200', buflim - bufbeg)); if (not_text && binary_files == WITHOUT_MATCH_BINARY_FILES) return 0; done_on_match += not_text; out_quiet += not_text; for (;;) { lastnl = bufbeg; if (lastout) lastout = bufbeg; beg = bufbeg + save; /* no more data to scan (eof) except for maybe a residue -> break */ if (beg == buflim) break; /* Determine new residue (the length of an incomplete line at the end of the buffer, 0 means there is no incomplete last line). */ oldc = beg[-1]; beg[-1] = eol; for (lim = buflim; lim[-1] != eol; lim--) continue; beg[-1] = oldc; if (lim == beg) lim = beg - residue; beg -= residue; residue = buflim - lim; if (beg < lim) { if (outleft) nlines += grepbuf (beg, lim); if (pending) prpending (lim); if ((!outleft && !pending) || (nlines && done_on_match && !out_invert)) goto finish_grep; } /* The last OUT_BEFORE lines at the end of the buffer will be needed as leading context if there is a matching line at the begin of the next data. Make beg point to their begin. */ i = 0; beg = lim; while (i < out_before && beg > bufbeg && beg != lastout) { ++i; do --beg; while (beg[-1] != eol); } /* detect if leading context is discontinuous from last printed line. */ if (beg != lastout) lastout = 0; /* Handle some details and read more data to scan. */ save = residue + lim - beg; if (out_byte) totalcc = add_count (totalcc, buflim - bufbeg - save); if (out_line) nlscan (beg); if (! fillbuf (save, stats)) { suppressible_error (filename, errno); goto finish_grep; } } if (residue) { *buflim++ = eol; if (outleft) nlines += grepbuf (bufbeg + save - residue, buflim); if (pending) prpending (buflim); } finish_grep: done_on_match -= not_text; out_quiet -= not_text; if ((not_text & ~out_quiet) && nlines != 0) printf (_("Binary file %s matches\n"), filename); return nlines; } static int grepfile (char const *file, struct stats *stats) { int desc; int count; int status; grepfile (char const *file, struct stats *stats) { int desc; intmax_t count; int status; filename = (file ? file : label ? label : _("(standard input)")); /* Don't open yet, since that might have side effects on a device. */ desc = -1; } else { /* When skipping directories, don't worry about directories that can't be opened. */ desc = open (file, O_RDONLY); if (desc < 0 && directories != SKIP_DIRECTORIES) { suppressible_error (file, errno); return 1; } } if (desc < 0 ? stat (file, &stats->stat) != 0 : fstat (desc, &stats->stat) != 0) { suppressible_error (filename, errno); if (file) close (desc); return 1; } if ((directories == SKIP_DIRECTORIES && S_ISDIR (stats->stat.st_mode)) || (devices == SKIP_DEVICES && (S_ISCHR (stats->stat.st_mode) || S_ISBLK (stats->stat.st_mode) || S_ISSOCK (stats->stat.st_mode) || S_ISFIFO (stats->stat.st_mode)))) { if (file) close (desc); return 1; } /* If there is a regular file on stdout and the current file refers to the same i-node, we have to report the problem and skip it. Otherwise when matching lines from some other input reach the disk before we open this file, we can end up reading and matching those lines and appending them to the file from which we're reading. Then we'd have what appears to be an infinite loop that'd terminate only upon filling the output file system or reaching a quota. However, there is no risk of an infinite loop if grep is generating no output, i.e., with --silent, --quiet, -q. Similarly, with any of these: --max-count=N (-m) (for N >= 2) --files-with-matches (-l) --files-without-match (-L) there is no risk of trouble. For --max-count=1, grep stops after printing the first match, so there is no risk of malfunction. But even --max-count=2, with input==output, while there is no risk of infloop, there is a race condition that could result in "alternate" output. */ if (!out_quiet && list_files == 0 && 1 < max_count && S_ISREG (out_stat.st_mode) && out_stat.st_ino && SAME_INODE (stats->stat, out_stat)) { if (! suppress_errors) error (0, 0, _("input file %s is also the output"), quote (filename)); errseen = 1; if (file) close (desc); return 1; } if (desc < 0) { desc = open (file, O_RDONLY); if (desc < 0) { suppressible_error (file, errno); return 1; } } #if defined SET_BINARY /* Set input to binary mode. Pipes are simulated with files on DOS, so this includes the case of "foo | grep bar". */ if (!isatty (desc)) SET_BINARY (desc); #endif count = grep (desc, file, stats); if (count < 0) status = count + 2; else { if (count_matches) { if (out_file) { print_filename (); if (filename_mask) print_sep (SEP_CHAR_SELECTED); else fputc (0, stdout); } printf ("%d\n", count); } else fputc (0, stdout); } printf ("%" PRIdMAX "\n", count); } status = !count; if (! file) { off_t required_offset = outleft ? bufoffset : after_last_match; if (required_offset != bufoffset && lseek (desc, required_offset, SEEK_SET) < 0 && S_ISREG (stats->stat.st_mode)) suppressible_error (filename, errno); } else while (close (desc) != 0) if (errno != EINTR) { suppressible_error (file, errno); break; } }
164,824
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video) { frame_flags_ &= ~(VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF); if (droppable_nframes_ > 0 && (cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) { for (unsigned int i = 0; i < droppable_nframes_; ++i) { if (droppable_frames_[i] == video->frame()) { std::cout << " Encoding droppable frame: " << droppable_frames_[i] << "\n"; frame_flags_ |= (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF); return; } } } } Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478 DO NOT MERGE - libvpx: Pull from upstream Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06 BUG=23452792 Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec CWE ID: CWE-119
virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video) { // // Frame flags and layer id for temporal layers. // For two layers, test pattern is: // 1 3 // 0 2 ..... // LAST is updated on base/layer 0, GOLDEN updated on layer 1. // Non-zero pattern_switch parameter means pattern will switch to // not using LAST for frame_num >= pattern_switch. int SetFrameFlags(int frame_num, int num_temp_layers, int pattern_switch) { int frame_flags = 0; if (num_temp_layers == 2) { if (frame_num % 2 == 0) { if (frame_num < pattern_switch || pattern_switch == 0) { // Layer 0: predict from LAST and ARF, update LAST. frame_flags = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; } else { // Layer 0: predict from GF and ARF, update GF. frame_flags = VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ARF; } } else { if (frame_num < pattern_switch || pattern_switch == 0) { // Layer 1: predict from L, GF, and ARF, update GF. frame_flags = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST; } else { // Layer 1: predict from GF and ARF, update GF. frame_flags = VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ARF; } } } return frame_flags; } virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video, ::libvpx_test::Encoder *encoder) { frame_flags_ &= ~(VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF); // For temporal layer case. if (cfg_.ts_number_layers > 1) { frame_flags_ = SetFrameFlags(video->frame(), cfg_.ts_number_layers, pattern_switch_); for (unsigned int i = 0; i < droppable_nframes_; ++i) { if (droppable_frames_[i] == video->frame()) { std::cout << "Encoding droppable frame: " << droppable_frames_[i] << "\n"; } } } else { if (droppable_nframes_ > 0 && (cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) { for (unsigned int i = 0; i < droppable_nframes_; ++i) { if (droppable_frames_[i] == video->frame()) { std::cout << "Encoding droppable frame: " << droppable_frames_[i] << "\n"; frame_flags_ |= (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF); return; } } } } }
174,542
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void PlatformSensorProviderBase::FreeResourcesIfNeeded() { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); if (sensor_map_.empty() && requests_map_.empty()) { FreeResources(); shared_buffer_handle_.reset(); } } Commit Message: android: Fix sensors in device service. This patch fixes a bug that prevented more than one sensor data to be available at once when using the device motion/orientation API. The issue was introduced by this other patch [1] which fixed some security-related issues in the way shared memory region handles are managed throughout Chromium (more details at https://crbug.com/789959). The device service´s sensor implementation doesn´t work correctly because it assumes it is possible to create a writable mapping of a given shared memory region at any time. This assumption is not correct on Android, once an Ashmem region has been turned read-only, such mappings are no longer possible. To fix the implementation, this CL changes the following: - PlatformSensor used to require moving a mojo::ScopedSharedBufferMapping into the newly-created instance. Said mapping being owned by and destroyed with the PlatformSensor instance. With this patch, the constructor instead takes a single pointer to the corresponding SensorReadingSharedBuffer, i.e. the area in memory where the sensor-specific reading data is located, and can be either updated or read-from. Note that the PlatformSensor does not own the mapping anymore. - PlatformSensorProviderBase holds the *single* writable mapping that is used to store all SensorReadingSharedBuffer buffers. It is created just after the region itself, and thus can be used even after the region's access mode has been changed to read-only. Addresses within the mapping will be passed to PlatformSensor constructors, computed from the mapping's base address plus a sensor-specific offset. The mapping is now owned by the PlatformSensorProviderBase instance. Note that, security-wise, nothing changes, because all mojo::ScopedSharedBufferMapping before the patch actually pointed to the same writable-page in memory anyway. Since unit or integration tests didn't catch the regression when [1] was submitted, this patch was tested manually by running a newly-built Chrome apk in the Android emulator and on a real device running Android O. [1] https://chromium-review.googlesource.com/c/chromium/src/+/805238 BUG=805146 [email protected],[email protected],[email protected],[email protected] Change-Id: I7d60a1cad278f48c361d2ece5a90de10eb082b44 Reviewed-on: https://chromium-review.googlesource.com/891180 Commit-Queue: David Turner <[email protected]> Reviewed-by: Reilly Grant <[email protected]> Reviewed-by: Matthew Cary <[email protected]> Reviewed-by: Alexandr Ilin <[email protected]> Cr-Commit-Position: refs/heads/master@{#532607} CWE ID: CWE-732
void PlatformSensorProviderBase::FreeResourcesIfNeeded() { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); if (sensor_map_.empty() && requests_map_.empty()) { FreeResources(); shared_buffer_mapping_.reset(); shared_buffer_handle_.reset(); } }
172,840
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void sig_server_connect_copy(SERVER_CONNECT_REC **dest, IRC_SERVER_CONNECT_REC *src) { IRC_SERVER_CONNECT_REC *rec; g_return_if_fail(dest != NULL); if (!IS_IRC_SERVER_CONNECT(src)) return; rec = g_new0(IRC_SERVER_CONNECT_REC, 1); rec->chat_type = IRC_PROTOCOL; rec->max_cmds_at_once = src->max_cmds_at_once; rec->cmd_queue_speed = src->cmd_queue_speed; rec->max_query_chans = src->max_query_chans; rec->max_kicks = src->max_kicks; rec->max_modes = src->max_modes; rec->max_msgs = src->max_msgs; rec->max_whois = src->max_whois; rec->usermode = g_strdup(src->usermode); rec->alternate_nick = g_strdup(src->alternate_nick); rec->sasl_mechanism = src->sasl_mechanism; rec->sasl_username = src->sasl_username; rec->sasl_password = src->sasl_password; *dest = (SERVER_CONNECT_REC *) rec; } Commit Message: Merge pull request #1058 from ailin-nemui/sasl-reconnect copy sasl username and password values CWE ID: CWE-416
static void sig_server_connect_copy(SERVER_CONNECT_REC **dest, IRC_SERVER_CONNECT_REC *src) { IRC_SERVER_CONNECT_REC *rec; g_return_if_fail(dest != NULL); if (!IS_IRC_SERVER_CONNECT(src)) return; rec = g_new0(IRC_SERVER_CONNECT_REC, 1); rec->chat_type = IRC_PROTOCOL; rec->max_cmds_at_once = src->max_cmds_at_once; rec->cmd_queue_speed = src->cmd_queue_speed; rec->max_query_chans = src->max_query_chans; rec->max_kicks = src->max_kicks; rec->max_modes = src->max_modes; rec->max_msgs = src->max_msgs; rec->max_whois = src->max_whois; rec->usermode = g_strdup(src->usermode); rec->alternate_nick = g_strdup(src->alternate_nick); rec->sasl_mechanism = src->sasl_mechanism; rec->sasl_username = g_strdup(src->sasl_username); rec->sasl_password = g_strdup(src->sasl_password); *dest = (SERVER_CONNECT_REC *) rec; }
169,643
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static Image *ReadTIFFImage(const ImageInfo *image_info, ExceptionInfo *exception) { const char *option; float *chromaticity, x_position, y_position, x_resolution, y_resolution; Image *image; int tiff_status; MagickBooleanType status; MagickSizeType number_pixels; QuantumInfo *quantum_info; QuantumType quantum_type; register ssize_t i; size_t pad; ssize_t y; TIFF *tiff; TIFFMethodType method; uint16 compress_tag, bits_per_sample, endian, extra_samples, interlace, max_sample_value, min_sample_value, orientation, pages, photometric, *sample_info, sample_format, samples_per_pixel, units, value; uint32 height, rows_per_strip, width; unsigned char *pixels; /* Open image. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } (void) SetMagickThreadValue(tiff_exception,exception); tiff=TIFFClientOpen(image->filename,"rb",(thandle_t) image,TIFFReadBlob, TIFFWriteBlob,TIFFSeekBlob,TIFFCloseBlob,TIFFGetBlobSize,TIFFMapBlob, TIFFUnmapBlob); if (tiff == (TIFF *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } if (image_info->number_scenes != 0) { /* Generate blank images for subimage specification (e.g. image.tif[4]. We need to check the number of directores because it is possible that the subimage(s) are stored in the photoshop profile. */ if (image_info->scene < (size_t)TIFFNumberOfDirectories(tiff)) { for (i=0; i < (ssize_t) image_info->scene; i++) { status=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse; if (status == MagickFalse) { TIFFClose(tiff); image=DestroyImageList(image); return((Image *) NULL); } AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { TIFFClose(tiff); image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); } } } do { DisableMSCWarning(4127) if (0 && (image_info->verbose != MagickFalse)) TIFFPrintDirectory(tiff,stdout,MagickFalse); RestoreMSCWarning if ((TIFFGetField(tiff,TIFFTAG_IMAGEWIDTH,&width) != 1) || (TIFFGetField(tiff,TIFFTAG_IMAGELENGTH,&height) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_COMPRESSION,&compress_tag) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_FILLORDER,&endian) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_PLANARCONFIG,&interlace) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLESPERPIXEL,&samples_per_pixel) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE,&bits_per_sample) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLEFORMAT,&sample_format) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_MINSAMPLEVALUE,&min_sample_value) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_MAXSAMPLEVALUE,&max_sample_value) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_PHOTOMETRIC,&photometric) != 1)) { TIFFClose(tiff); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } if (sample_format == SAMPLEFORMAT_IEEEFP) (void) SetImageProperty(image,"quantum:format","floating-point"); switch (photometric) { case PHOTOMETRIC_MINISBLACK: { (void) SetImageProperty(image,"tiff:photometric","min-is-black"); break; } case PHOTOMETRIC_MINISWHITE: { (void) SetImageProperty(image,"tiff:photometric","min-is-white"); break; } case PHOTOMETRIC_PALETTE: { (void) SetImageProperty(image,"tiff:photometric","palette"); break; } case PHOTOMETRIC_RGB: { (void) SetImageProperty(image,"tiff:photometric","RGB"); break; } case PHOTOMETRIC_CIELAB: { (void) SetImageProperty(image,"tiff:photometric","CIELAB"); break; } case PHOTOMETRIC_LOGL: { (void) SetImageProperty(image,"tiff:photometric","CIE Log2(L)"); break; } case PHOTOMETRIC_LOGLUV: { (void) SetImageProperty(image,"tiff:photometric","LOGLUV"); break; } #if defined(PHOTOMETRIC_MASK) case PHOTOMETRIC_MASK: { (void) SetImageProperty(image,"tiff:photometric","MASK"); break; } #endif case PHOTOMETRIC_SEPARATED: { (void) SetImageProperty(image,"tiff:photometric","separated"); break; } case PHOTOMETRIC_YCBCR: { (void) SetImageProperty(image,"tiff:photometric","YCBCR"); break; } default: { (void) SetImageProperty(image,"tiff:photometric","unknown"); break; } } if (image->debug != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %ux%u", (unsigned int) width,(unsigned int) height); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Interlace: %u", interlace); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Bits per sample: %u",bits_per_sample); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Min sample value: %u",min_sample_value); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Max sample value: %u",max_sample_value); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Photometric " "interpretation: %s",GetImageProperty(image,"tiff:photometric")); } image->columns=(size_t) width; image->rows=(size_t) height; image->depth=(size_t) bits_per_sample; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Image depth: %.20g", (double) image->depth); image->endian=MSBEndian; if (endian == FILLORDER_LSB2MSB) image->endian=LSBEndian; #if defined(MAGICKCORE_HAVE_TIFFISBIGENDIAN) if (TIFFIsBigEndian(tiff) == 0) { (void) SetImageProperty(image,"tiff:endian","lsb"); image->endian=LSBEndian; } else { (void) SetImageProperty(image,"tiff:endian","msb"); image->endian=MSBEndian; } #endif if ((photometric == PHOTOMETRIC_MINISBLACK) || (photometric == PHOTOMETRIC_MINISWHITE)) SetImageColorspace(image,GRAYColorspace); if (photometric == PHOTOMETRIC_SEPARATED) SetImageColorspace(image,CMYKColorspace); if (photometric == PHOTOMETRIC_CIELAB) SetImageColorspace(image,LabColorspace); TIFFGetProfiles(tiff,image,image_info->ping); TIFFGetProperties(tiff,image); option=GetImageOption(image_info,"tiff:exif-properties"); if ((option == (const char *) NULL) || (IsMagickTrue(option) != MagickFalse)) TIFFGetEXIFProperties(tiff,image); if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XRESOLUTION,&x_resolution) == 1) && (TIFFGetFieldDefaulted(tiff,TIFFTAG_YRESOLUTION,&y_resolution) == 1)) { image->x_resolution=x_resolution; image->y_resolution=y_resolution; } if (TIFFGetFieldDefaulted(tiff,TIFFTAG_RESOLUTIONUNIT,&units) == 1) { if (units == RESUNIT_INCH) image->units=PixelsPerInchResolution; if (units == RESUNIT_CENTIMETER) image->units=PixelsPerCentimeterResolution; } if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XPOSITION,&x_position) == 1) && (TIFFGetFieldDefaulted(tiff,TIFFTAG_YPOSITION,&y_position) == 1)) { image->page.x=(ssize_t) ceil(x_position*image->x_resolution-0.5); image->page.y=(ssize_t) ceil(y_position*image->y_resolution-0.5); } if (TIFFGetFieldDefaulted(tiff,TIFFTAG_ORIENTATION,&orientation) == 1) image->orientation=(OrientationType) orientation; if (TIFFGetField(tiff,TIFFTAG_WHITEPOINT,&chromaticity) == 1) { if (chromaticity != (float *) NULL) { image->chromaticity.white_point.x=chromaticity[0]; image->chromaticity.white_point.y=chromaticity[1]; } } if (TIFFGetField(tiff,TIFFTAG_PRIMARYCHROMATICITIES,&chromaticity) == 1) { if (chromaticity != (float *) NULL) { image->chromaticity.red_primary.x=chromaticity[0]; image->chromaticity.red_primary.y=chromaticity[1]; image->chromaticity.green_primary.x=chromaticity[2]; image->chromaticity.green_primary.y=chromaticity[3]; image->chromaticity.blue_primary.x=chromaticity[4]; image->chromaticity.blue_primary.y=chromaticity[5]; } } #if defined(MAGICKCORE_HAVE_TIFFISCODECCONFIGURED) || (TIFFLIB_VERSION > 20040919) if ((compress_tag != COMPRESSION_NONE) && (TIFFIsCODECConfigured(compress_tag) == 0)) { TIFFClose(tiff); ThrowReaderException(CoderError,"CompressNotSupported"); } #endif switch (compress_tag) { case COMPRESSION_NONE: image->compression=NoCompression; break; case COMPRESSION_CCITTFAX3: image->compression=FaxCompression; break; case COMPRESSION_CCITTFAX4: image->compression=Group4Compression; break; case COMPRESSION_JPEG: { image->compression=JPEGCompression; #if defined(JPEG_SUPPORT) { char sampling_factor[MaxTextExtent]; int tiff_status; uint16 horizontal, vertical; tiff_status=TIFFGetFieldDefaulted(tiff,TIFFTAG_YCBCRSUBSAMPLING, &horizontal,&vertical); if (tiff_status == 1) { (void) FormatLocaleString(sampling_factor,MaxTextExtent,"%dx%d", horizontal,vertical); (void) SetImageProperty(image,"jpeg:sampling-factor", sampling_factor); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling Factors: %s",sampling_factor); } } #endif break; } case COMPRESSION_OJPEG: image->compression=JPEGCompression; break; #if defined(COMPRESSION_LZMA) case COMPRESSION_LZMA: image->compression=LZMACompression; break; #endif case COMPRESSION_LZW: image->compression=LZWCompression; break; case COMPRESSION_DEFLATE: image->compression=ZipCompression; break; case COMPRESSION_ADOBE_DEFLATE: image->compression=ZipCompression; break; default: image->compression=RLECompression; break; } /* Allocate memory for the image and pixel buffer. */ quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } if (sample_format == SAMPLEFORMAT_UINT) status=SetQuantumFormat(image,quantum_info,UnsignedQuantumFormat); if (sample_format == SAMPLEFORMAT_INT) status=SetQuantumFormat(image,quantum_info,SignedQuantumFormat); if (sample_format == SAMPLEFORMAT_IEEEFP) status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat); if (status == MagickFalse) { TIFFClose(tiff); quantum_info=DestroyQuantumInfo(quantum_info); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } status=MagickTrue; switch (photometric) { case PHOTOMETRIC_MINISBLACK: { quantum_info->min_is_white=MagickFalse; break; } case PHOTOMETRIC_MINISWHITE: { quantum_info->min_is_white=MagickTrue; break; } default: break; } tiff_status=TIFFGetFieldDefaulted(tiff,TIFFTAG_EXTRASAMPLES,&extra_samples, &sample_info); if (tiff_status == 1) { (void) SetImageProperty(image,"tiff:alpha","unspecified"); if (extra_samples == 0) { if ((samples_per_pixel == 4) && (photometric == PHOTOMETRIC_RGB)) image->matte=MagickTrue; } else for (i=0; i < extra_samples; i++) { image->matte=MagickTrue; if (sample_info[i] == EXTRASAMPLE_ASSOCALPHA) { SetQuantumAlphaType(quantum_info,DisassociatedQuantumAlpha); (void) SetImageProperty(image,"tiff:alpha","associated"); } else if (sample_info[i] == EXTRASAMPLE_UNASSALPHA) (void) SetImageProperty(image,"tiff:alpha","unassociated"); } } if ((photometric == PHOTOMETRIC_PALETTE) && (pow(2.0,1.0*bits_per_sample) <= MaxColormapSize)) { size_t colors; colors=(size_t) GetQuantumRange(bits_per_sample)+1; if (AcquireImageColormap(image,colors) == MagickFalse) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } } if (TIFFGetFieldDefaulted(tiff,TIFFTAG_PAGENUMBER,&value,&pages) == 1) image->scene=value; if (image->storage_class == PseudoClass) { int tiff_status; size_t range; uint16 *blue_colormap, *green_colormap, *red_colormap; /* Initialize colormap. */ tiff_status=TIFFGetField(tiff,TIFFTAG_COLORMAP,&red_colormap, &green_colormap,&blue_colormap); if (tiff_status == 1) { if ((red_colormap != (uint16 *) NULL) && (green_colormap != (uint16 *) NULL) && (blue_colormap != (uint16 *) NULL)) { range=255; /* might be old style 8-bit colormap */ for (i=0; i < (ssize_t) image->colors; i++) if ((red_colormap[i] >= 256) || (green_colormap[i] >= 256) || (blue_colormap[i] >= 256)) { range=65535; break; } for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ClampToQuantum(((double) QuantumRange*red_colormap[i])/range); image->colormap[i].green=ClampToQuantum(((double) QuantumRange*green_colormap[i])/range); image->colormap[i].blue=ClampToQuantum(((double) QuantumRange*blue_colormap[i])/range); } } } if (image->matte == MagickFalse) image->depth=GetImageDepth(image,exception); } if (image_info->ping != MagickFalse) { if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) { quantum_info=DestroyQuantumInfo(quantum_info); break; } goto next_tiff_frame; } method=ReadGenericMethod; if (TIFFGetField(tiff,TIFFTAG_ROWSPERSTRIP,&rows_per_strip) == 1) { char value[MaxTextExtent]; method=ReadStripMethod; (void) FormatLocaleString(value,MaxTextExtent,"%u",(unsigned int) rows_per_strip); (void) SetImageProperty(image,"tiff:rows-per-strip",value); } if ((samples_per_pixel >= 2) && (interlace == PLANARCONFIG_CONTIG)) method=ReadRGBAMethod; if ((samples_per_pixel >= 2) && (interlace == PLANARCONFIG_SEPARATE)) method=ReadCMYKAMethod; if ((photometric != PHOTOMETRIC_RGB) && (photometric != PHOTOMETRIC_CIELAB) && (photometric != PHOTOMETRIC_SEPARATED)) method=ReadGenericMethod; if (image->storage_class == PseudoClass) method=ReadSingleSampleMethod; if ((photometric == PHOTOMETRIC_MINISBLACK) || (photometric == PHOTOMETRIC_MINISWHITE)) method=ReadSingleSampleMethod; if ((photometric != PHOTOMETRIC_SEPARATED) && (interlace == PLANARCONFIG_SEPARATE) && (bits_per_sample < 64)) method=ReadGenericMethod; if (image->compression == JPEGCompression) method=GetJPEGMethod(image,tiff,photometric,bits_per_sample, samples_per_pixel); if (compress_tag == COMPRESSION_JBIG) method=ReadStripMethod; if (TIFFIsTiled(tiff) != MagickFalse) method=ReadTileMethod; quantum_info->endian=LSBEndian; quantum_type=RGBQuantum; pixels=GetQuantumPixels(quantum_info); switch (method) { case ReadSingleSampleMethod: { /* Convert TIFF image to PseudoClass MIFF image. */ quantum_type=IndexQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-1,0); if (image->matte != MagickFalse) { if (image->storage_class != PseudoClass) { quantum_type=samples_per_pixel == 1 ? AlphaQuantum : GrayAlphaQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-2,0); } else { quantum_type=IndexAlphaQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-2,0); } } else if (image->storage_class != PseudoClass) { quantum_type=GrayQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-1,0); } status=SetQuantumPad(image,quantum_info,pad*((bits_per_sample+7) >> 3)); if (status == MagickFalse) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=GetQuantumPixels(quantum_info); for (y=0; y < (ssize_t) image->rows; y++) { int status; register PixelPacket *magick_restrict q; status=TIFFReadPixels(tiff,bits_per_sample,0,y,(char *) pixels); if (status == -1) break; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case ReadRGBAMethod: { /* Convert TIFF image to DirectClass MIFF image. */ pad=(size_t) MagickMax((size_t) samples_per_pixel-3,0); quantum_type=RGBQuantum; if (image->matte != MagickFalse) { quantum_type=RGBAQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0); } if (image->colorspace == CMYKColorspace) { pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0); quantum_type=CMYKQuantum; if (image->matte != MagickFalse) { quantum_type=CMYKAQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-5,0); } } status=SetQuantumPad(image,quantum_info,pad*((bits_per_sample+7) >> 3)); if (status == MagickFalse) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=GetQuantumPixels(quantum_info); for (y=0; y < (ssize_t) image->rows; y++) { int status; register PixelPacket *magick_restrict q; status=TIFFReadPixels(tiff,bits_per_sample,0,y,(char *) pixels); if (status == -1) break; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case ReadCMYKAMethod: { /* Convert TIFF image to DirectClass MIFF image. */ for (i=0; i < (ssize_t) samples_per_pixel; i++) { for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; int status; status=TIFFReadPixels(tiff,bits_per_sample,(tsample_t) i,y,(char *) pixels); if (status == -1) break; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; if (image->colorspace != CMYKColorspace) switch (i) { case 0: quantum_type=RedQuantum; break; case 1: quantum_type=GreenQuantum; break; case 2: quantum_type=BlueQuantum; break; case 3: quantum_type=AlphaQuantum; break; default: quantum_type=UndefinedQuantum; break; } else switch (i) { case 0: quantum_type=CyanQuantum; break; case 1: quantum_type=MagentaQuantum; break; case 2: quantum_type=YellowQuantum; break; case 3: quantum_type=BlackQuantum; break; case 4: quantum_type=AlphaQuantum; break; default: quantum_type=UndefinedQuantum; break; } (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case ReadYCCKMethod: { pixels=GetQuantumPixels(quantum_info); for (y=0; y < (ssize_t) image->rows; y++) { int status; register IndexPacket *indexes; register PixelPacket *magick_restrict q; register ssize_t x; unsigned char *p; status=TIFFReadPixels(tiff,bits_per_sample,0,y,(char *) pixels); if (status == -1) break; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); p=pixels; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(q,ScaleCharToQuantum(ClampYCC((double) *p+ (1.402*(double) *(p+2))-179.456))); SetPixelMagenta(q,ScaleCharToQuantum(ClampYCC((double) *p- (0.34414*(double) *(p+1))-(0.71414*(double ) *(p+2))+ 135.45984))); SetPixelYellow(q,ScaleCharToQuantum(ClampYCC((double) *p+ (1.772*(double) *(p+1))-226.816))); SetPixelBlack(indexes+x,ScaleCharToQuantum((unsigned char)*(p+3))); q++; p+=4; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case ReadStripMethod: { register uint32 *p; /* Convert stripped TIFF image to DirectClass MIFF image. */ i=0; p=(uint32 *) NULL; for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; if (i == 0) { if (TIFFReadRGBAStrip(tiff,(tstrip_t) y,(uint32 *) pixels) == 0) break; i=(ssize_t) MagickMin((ssize_t) rows_per_strip,(ssize_t) image->rows-y); } i--; p=((uint32 *) pixels)+image->columns*i; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum((unsigned char) (TIFFGetR(*p)))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) (TIFFGetG(*p)))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) (TIFFGetB(*p)))); if (image->matte != MagickFalse) SetPixelOpacity(q,ScaleCharToQuantum((unsigned char) (TIFFGetA(*p)))); p++; q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case ReadTileMethod: { register uint32 *p; uint32 *tile_pixels, columns, rows; /* Convert tiled TIFF image to DirectClass MIFF image. */ if ((TIFFGetField(tiff,TIFFTAG_TILEWIDTH,&columns) != 1) || (TIFFGetField(tiff,TIFFTAG_TILELENGTH,&rows) != 1)) { TIFFClose(tiff); ThrowReaderException(CoderError,"ImageIsNotTiled"); } (void) SetImageStorageClass(image,DirectClass); number_pixels=(MagickSizeType) columns*rows; if ((number_pixels*sizeof(uint32)) != (MagickSizeType) ((size_t) (number_pixels*sizeof(uint32)))) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } tile_pixels=(uint32 *) AcquireQuantumMemory(number_pixels, sizeof(*tile_pixels)); if (tile_pixels == (uint32 *) NULL) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } for (y=0; y < (ssize_t) image->rows; y+=rows) { PixelPacket *tile; register ssize_t x; register PixelPacket *magick_restrict q; size_t columns_remaining, rows_remaining; rows_remaining=image->rows-y; if ((ssize_t) (y+rows) < (ssize_t) image->rows) rows_remaining=rows; tile=QueueAuthenticPixels(image,0,y,image->columns,rows_remaining, exception); if (tile == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x+=columns) { size_t column, row; if (TIFFReadRGBATile(tiff,(uint32) x,(uint32) y,tile_pixels) == 0) break; columns_remaining=image->columns-x; if ((ssize_t) (x+columns) < (ssize_t) image->columns) columns_remaining=columns; p=tile_pixels+(rows-rows_remaining)*columns; q=tile+(image->columns*(rows_remaining-1)+x); for (row=rows_remaining; row > 0; row--) { if (image->matte != MagickFalse) for (column=columns_remaining; column > 0; column--) { SetPixelRed(q,ScaleCharToQuantum((unsigned char) TIFFGetR(*p))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) TIFFGetG(*p))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) TIFFGetB(*p))); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) TIFFGetA(*p))); q++; p++; } else for (column=columns_remaining; column > 0; column--) { SetPixelRed(q,ScaleCharToQuantum((unsigned char) TIFFGetR(*p))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) TIFFGetG(*p))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) TIFFGetB(*p))); q++; p++; } p+=columns-columns_remaining; q-=(image->columns+columns_remaining); } } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } tile_pixels=(uint32 *) RelinquishMagickMemory(tile_pixels); break; } case ReadGenericMethod: default: { MemoryInfo *pixel_info; register uint32 *p; uint32 *pixels; /* Convert TIFF image to DirectClass MIFF image. */ number_pixels=(MagickSizeType) image->columns*image->rows; if ((number_pixels*sizeof(uint32)) != (MagickSizeType) ((size_t) (number_pixels*sizeof(uint32)))) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } pixel_info=AcquireVirtualMemory(image->columns,image->rows* sizeof(uint32)); if (pixel_info == (MemoryInfo *) NULL) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(uint32 *) GetVirtualMemoryBlob(pixel_info); (void) TIFFReadRGBAImage(tiff,(uint32) image->columns,(uint32) image->rows,(uint32 *) pixels,0); /* Convert image to DirectClass pixel packets. */ p=pixels+number_pixels-1; for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; q+=image->columns-1; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum((unsigned char) TIFFGetR(*p))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) TIFFGetG(*p))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) TIFFGetB(*p))); if (image->matte != MagickFalse) SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) TIFFGetA(*p))); p--; q--; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } pixel_info=RelinquishVirtualMemory(pixel_info); break; } } SetQuantumImageType(image,quantum_type); next_tiff_frame: quantum_info=DestroyQuantumInfo(quantum_info); if (photometric == PHOTOMETRIC_CIELAB) DecodeLabImage(image,exception); if ((photometric == PHOTOMETRIC_LOGL) || (photometric == PHOTOMETRIC_MINISBLACK) || (photometric == PHOTOMETRIC_MINISWHITE)) { image->type=GrayscaleType; if (bits_per_sample == 1) image->type=BilevelType; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse; if (status != MagickFalse) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,image->scene-1, image->scene); if (status == MagickFalse) break; } } while (status != MagickFalse); TIFFClose(tiff); TIFFReadPhotoshopLayers(image,image_info,exception); if (image_info->number_scenes != 0) { if (image_info->scene >= GetImageListLength(image)) { /* Subimage was not found in the Photoshop layer */ image = DestroyImageList(image); return((Image *)NULL); } } return(GetFirstImageInList(image)); } Commit Message: ... CWE ID: CWE-119
static Image *ReadTIFFImage(const ImageInfo *image_info, ExceptionInfo *exception) { const char *option; float *chromaticity, x_position, y_position, x_resolution, y_resolution; Image *image; int tiff_status; MagickBooleanType status; MagickSizeType number_pixels; QuantumInfo *quantum_info; QuantumType quantum_type; register ssize_t i; size_t pad; ssize_t y; TIFF *tiff; TIFFMethodType method; uint16 compress_tag, bits_per_sample, endian, extra_samples, interlace, max_sample_value, min_sample_value, orientation, pages, photometric, *sample_info, sample_format, samples_per_pixel, units, value; uint32 height, rows_per_strip, width; unsigned char *pixels; /* Open image. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } (void) SetMagickThreadValue(tiff_exception,exception); tiff=TIFFClientOpen(image->filename,"rb",(thandle_t) image,TIFFReadBlob, TIFFWriteBlob,TIFFSeekBlob,TIFFCloseBlob,TIFFGetBlobSize,TIFFMapBlob, TIFFUnmapBlob); if (tiff == (TIFF *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } if (image_info->number_scenes != 0) { /* Generate blank images for subimage specification (e.g. image.tif[4]. We need to check the number of directores because it is possible that the subimage(s) are stored in the photoshop profile. */ if (image_info->scene < (size_t)TIFFNumberOfDirectories(tiff)) { for (i=0; i < (ssize_t) image_info->scene; i++) { status=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse; if (status == MagickFalse) { TIFFClose(tiff); image=DestroyImageList(image); return((Image *) NULL); } AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { TIFFClose(tiff); image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); } } } do { DisableMSCWarning(4127) if (0 && (image_info->verbose != MagickFalse)) TIFFPrintDirectory(tiff,stdout,MagickFalse); RestoreMSCWarning if ((TIFFGetField(tiff,TIFFTAG_IMAGEWIDTH,&width) != 1) || (TIFFGetField(tiff,TIFFTAG_IMAGELENGTH,&height) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_COMPRESSION,&compress_tag) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_FILLORDER,&endian) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_PLANARCONFIG,&interlace) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLESPERPIXEL,&samples_per_pixel) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE,&bits_per_sample) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLEFORMAT,&sample_format) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_MINSAMPLEVALUE,&min_sample_value) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_MAXSAMPLEVALUE,&max_sample_value) != 1) || (TIFFGetFieldDefaulted(tiff,TIFFTAG_PHOTOMETRIC,&photometric) != 1)) { TIFFClose(tiff); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } if (sample_format == SAMPLEFORMAT_IEEEFP) (void) SetImageProperty(image,"quantum:format","floating-point"); switch (photometric) { case PHOTOMETRIC_MINISBLACK: { (void) SetImageProperty(image,"tiff:photometric","min-is-black"); break; } case PHOTOMETRIC_MINISWHITE: { (void) SetImageProperty(image,"tiff:photometric","min-is-white"); break; } case PHOTOMETRIC_PALETTE: { (void) SetImageProperty(image,"tiff:photometric","palette"); break; } case PHOTOMETRIC_RGB: { (void) SetImageProperty(image,"tiff:photometric","RGB"); break; } case PHOTOMETRIC_CIELAB: { (void) SetImageProperty(image,"tiff:photometric","CIELAB"); break; } case PHOTOMETRIC_LOGL: { (void) SetImageProperty(image,"tiff:photometric","CIE Log2(L)"); break; } case PHOTOMETRIC_LOGLUV: { (void) SetImageProperty(image,"tiff:photometric","LOGLUV"); break; } #if defined(PHOTOMETRIC_MASK) case PHOTOMETRIC_MASK: { (void) SetImageProperty(image,"tiff:photometric","MASK"); break; } #endif case PHOTOMETRIC_SEPARATED: { (void) SetImageProperty(image,"tiff:photometric","separated"); break; } case PHOTOMETRIC_YCBCR: { (void) SetImageProperty(image,"tiff:photometric","YCBCR"); break; } default: { (void) SetImageProperty(image,"tiff:photometric","unknown"); break; } } if (image->debug != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %ux%u", (unsigned int) width,(unsigned int) height); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Interlace: %u", interlace); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Bits per sample: %u",bits_per_sample); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Min sample value: %u",min_sample_value); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Max sample value: %u",max_sample_value); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Photometric " "interpretation: %s",GetImageProperty(image,"tiff:photometric")); } image->columns=(size_t) width; image->rows=(size_t) height; image->depth=(size_t) bits_per_sample; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Image depth: %.20g", (double) image->depth); image->endian=MSBEndian; if (endian == FILLORDER_LSB2MSB) image->endian=LSBEndian; #if defined(MAGICKCORE_HAVE_TIFFISBIGENDIAN) if (TIFFIsBigEndian(tiff) == 0) { (void) SetImageProperty(image,"tiff:endian","lsb"); image->endian=LSBEndian; } else { (void) SetImageProperty(image,"tiff:endian","msb"); image->endian=MSBEndian; } #endif if ((photometric == PHOTOMETRIC_MINISBLACK) || (photometric == PHOTOMETRIC_MINISWHITE)) SetImageColorspace(image,GRAYColorspace); if (photometric == PHOTOMETRIC_SEPARATED) SetImageColorspace(image,CMYKColorspace); if (photometric == PHOTOMETRIC_CIELAB) SetImageColorspace(image,LabColorspace); TIFFGetProfiles(tiff,image,image_info->ping); TIFFGetProperties(tiff,image); option=GetImageOption(image_info,"tiff:exif-properties"); if ((option == (const char *) NULL) || (IsMagickTrue(option) != MagickFalse)) TIFFGetEXIFProperties(tiff,image); if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XRESOLUTION,&x_resolution) == 1) && (TIFFGetFieldDefaulted(tiff,TIFFTAG_YRESOLUTION,&y_resolution) == 1)) { image->x_resolution=x_resolution; image->y_resolution=y_resolution; } if (TIFFGetFieldDefaulted(tiff,TIFFTAG_RESOLUTIONUNIT,&units) == 1) { if (units == RESUNIT_INCH) image->units=PixelsPerInchResolution; if (units == RESUNIT_CENTIMETER) image->units=PixelsPerCentimeterResolution; } if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XPOSITION,&x_position) == 1) && (TIFFGetFieldDefaulted(tiff,TIFFTAG_YPOSITION,&y_position) == 1)) { image->page.x=(ssize_t) ceil(x_position*image->x_resolution-0.5); image->page.y=(ssize_t) ceil(y_position*image->y_resolution-0.5); } if (TIFFGetFieldDefaulted(tiff,TIFFTAG_ORIENTATION,&orientation) == 1) image->orientation=(OrientationType) orientation; if (TIFFGetField(tiff,TIFFTAG_WHITEPOINT,&chromaticity) == 1) { if (chromaticity != (float *) NULL) { image->chromaticity.white_point.x=chromaticity[0]; image->chromaticity.white_point.y=chromaticity[1]; } } if (TIFFGetField(tiff,TIFFTAG_PRIMARYCHROMATICITIES,&chromaticity) == 1) { if (chromaticity != (float *) NULL) { image->chromaticity.red_primary.x=chromaticity[0]; image->chromaticity.red_primary.y=chromaticity[1]; image->chromaticity.green_primary.x=chromaticity[2]; image->chromaticity.green_primary.y=chromaticity[3]; image->chromaticity.blue_primary.x=chromaticity[4]; image->chromaticity.blue_primary.y=chromaticity[5]; } } #if defined(MAGICKCORE_HAVE_TIFFISCODECCONFIGURED) || (TIFFLIB_VERSION > 20040919) if ((compress_tag != COMPRESSION_NONE) && (TIFFIsCODECConfigured(compress_tag) == 0)) { TIFFClose(tiff); ThrowReaderException(CoderError,"CompressNotSupported"); } #endif switch (compress_tag) { case COMPRESSION_NONE: image->compression=NoCompression; break; case COMPRESSION_CCITTFAX3: image->compression=FaxCompression; break; case COMPRESSION_CCITTFAX4: image->compression=Group4Compression; break; case COMPRESSION_JPEG: { image->compression=JPEGCompression; #if defined(JPEG_SUPPORT) { char sampling_factor[MaxTextExtent]; int tiff_status; uint16 horizontal, vertical; tiff_status=TIFFGetFieldDefaulted(tiff,TIFFTAG_YCBCRSUBSAMPLING, &horizontal,&vertical); if (tiff_status == 1) { (void) FormatLocaleString(sampling_factor,MaxTextExtent,"%dx%d", horizontal,vertical); (void) SetImageProperty(image,"jpeg:sampling-factor", sampling_factor); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling Factors: %s",sampling_factor); } } #endif break; } case COMPRESSION_OJPEG: image->compression=JPEGCompression; break; #if defined(COMPRESSION_LZMA) case COMPRESSION_LZMA: image->compression=LZMACompression; break; #endif case COMPRESSION_LZW: image->compression=LZWCompression; break; case COMPRESSION_DEFLATE: image->compression=ZipCompression; break; case COMPRESSION_ADOBE_DEFLATE: image->compression=ZipCompression; break; default: image->compression=RLECompression; break; } /* Allocate memory for the image and pixel buffer. */ quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } if (sample_format == SAMPLEFORMAT_UINT) status=SetQuantumFormat(image,quantum_info,UnsignedQuantumFormat); if (sample_format == SAMPLEFORMAT_INT) status=SetQuantumFormat(image,quantum_info,SignedQuantumFormat); if (sample_format == SAMPLEFORMAT_IEEEFP) status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat); if (status == MagickFalse) { TIFFClose(tiff); quantum_info=DestroyQuantumInfo(quantum_info); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } status=MagickTrue; switch (photometric) { case PHOTOMETRIC_MINISBLACK: { quantum_info->min_is_white=MagickFalse; break; } case PHOTOMETRIC_MINISWHITE: { quantum_info->min_is_white=MagickTrue; break; } default: break; } tiff_status=TIFFGetFieldDefaulted(tiff,TIFFTAG_EXTRASAMPLES,&extra_samples, &sample_info); if (tiff_status == 1) { (void) SetImageProperty(image,"tiff:alpha","unspecified"); if (extra_samples == 0) { if ((samples_per_pixel == 4) && (photometric == PHOTOMETRIC_RGB)) image->matte=MagickTrue; } else for (i=0; i < extra_samples; i++) { image->matte=MagickTrue; if (sample_info[i] == EXTRASAMPLE_ASSOCALPHA) { SetQuantumAlphaType(quantum_info,DisassociatedQuantumAlpha); (void) SetImageProperty(image,"tiff:alpha","associated"); } else if (sample_info[i] == EXTRASAMPLE_UNASSALPHA) (void) SetImageProperty(image,"tiff:alpha","unassociated"); } } if ((photometric == PHOTOMETRIC_PALETTE) && (pow(2.0,1.0*bits_per_sample) <= MaxColormapSize)) { size_t colors; colors=(size_t) GetQuantumRange(bits_per_sample)+1; if (AcquireImageColormap(image,colors) == MagickFalse) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } } if (TIFFGetFieldDefaulted(tiff,TIFFTAG_PAGENUMBER,&value,&pages) == 1) image->scene=value; if (image->storage_class == PseudoClass) { int tiff_status; size_t range; uint16 *blue_colormap, *green_colormap, *red_colormap; /* Initialize colormap. */ tiff_status=TIFFGetField(tiff,TIFFTAG_COLORMAP,&red_colormap, &green_colormap,&blue_colormap); if (tiff_status == 1) { if ((red_colormap != (uint16 *) NULL) && (green_colormap != (uint16 *) NULL) && (blue_colormap != (uint16 *) NULL)) { range=255; /* might be old style 8-bit colormap */ for (i=0; i < (ssize_t) image->colors; i++) if ((red_colormap[i] >= 256) || (green_colormap[i] >= 256) || (blue_colormap[i] >= 256)) { range=65535; break; } for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ClampToQuantum(((double) QuantumRange*red_colormap[i])/range); image->colormap[i].green=ClampToQuantum(((double) QuantumRange*green_colormap[i])/range); image->colormap[i].blue=ClampToQuantum(((double) QuantumRange*blue_colormap[i])/range); } } } if (image->matte == MagickFalse) image->depth=GetImageDepth(image,exception); } if (image_info->ping != MagickFalse) { if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) { quantum_info=DestroyQuantumInfo(quantum_info); break; } goto next_tiff_frame; } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } method=ReadGenericMethod; if (TIFFGetField(tiff,TIFFTAG_ROWSPERSTRIP,&rows_per_strip) == 1) { char value[MaxTextExtent]; method=ReadStripMethod; (void) FormatLocaleString(value,MaxTextExtent,"%u",(unsigned int) rows_per_strip); (void) SetImageProperty(image,"tiff:rows-per-strip",value); } if ((samples_per_pixel >= 2) && (interlace == PLANARCONFIG_CONTIG)) method=ReadRGBAMethod; if ((samples_per_pixel >= 2) && (interlace == PLANARCONFIG_SEPARATE)) method=ReadCMYKAMethod; if ((photometric != PHOTOMETRIC_RGB) && (photometric != PHOTOMETRIC_CIELAB) && (photometric != PHOTOMETRIC_SEPARATED)) method=ReadGenericMethod; if (image->storage_class == PseudoClass) method=ReadSingleSampleMethod; if ((photometric == PHOTOMETRIC_MINISBLACK) || (photometric == PHOTOMETRIC_MINISWHITE)) method=ReadSingleSampleMethod; if ((photometric != PHOTOMETRIC_SEPARATED) && (interlace == PLANARCONFIG_SEPARATE) && (bits_per_sample < 64)) method=ReadGenericMethod; if (image->compression == JPEGCompression) method=GetJPEGMethod(image,tiff,photometric,bits_per_sample, samples_per_pixel); if (compress_tag == COMPRESSION_JBIG) method=ReadStripMethod; if (TIFFIsTiled(tiff) != MagickFalse) method=ReadTileMethod; quantum_info->endian=LSBEndian; quantum_type=RGBQuantum; pixels=GetQuantumPixels(quantum_info); switch (method) { case ReadSingleSampleMethod: { /* Convert TIFF image to PseudoClass MIFF image. */ quantum_type=IndexQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-1,0); if (image->matte != MagickFalse) { if (image->storage_class != PseudoClass) { quantum_type=samples_per_pixel == 1 ? AlphaQuantum : GrayAlphaQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-2,0); } else { quantum_type=IndexAlphaQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-2,0); } } else if (image->storage_class != PseudoClass) { quantum_type=GrayQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-1,0); } status=SetQuantumPad(image,quantum_info,pad*((bits_per_sample+7) >> 3)); if (status == MagickFalse) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=GetQuantumPixels(quantum_info); for (y=0; y < (ssize_t) image->rows; y++) { int status; register PixelPacket *magick_restrict q; status=TIFFReadPixels(tiff,bits_per_sample,0,y,(char *) pixels); if (status == -1) break; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case ReadRGBAMethod: { /* Convert TIFF image to DirectClass MIFF image. */ pad=(size_t) MagickMax((size_t) samples_per_pixel-3,0); quantum_type=RGBQuantum; if (image->matte != MagickFalse) { quantum_type=RGBAQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0); } if (image->colorspace == CMYKColorspace) { pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0); quantum_type=CMYKQuantum; if (image->matte != MagickFalse) { quantum_type=CMYKAQuantum; pad=(size_t) MagickMax((size_t) samples_per_pixel-5,0); } } status=SetQuantumPad(image,quantum_info,pad*((bits_per_sample+7) >> 3)); if (status == MagickFalse) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=GetQuantumPixels(quantum_info); for (y=0; y < (ssize_t) image->rows; y++) { int status; register PixelPacket *magick_restrict q; status=TIFFReadPixels(tiff,bits_per_sample,0,y,(char *) pixels); if (status == -1) break; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case ReadCMYKAMethod: { /* Convert TIFF image to DirectClass MIFF image. */ for (i=0; i < (ssize_t) samples_per_pixel; i++) { for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; int status; status=TIFFReadPixels(tiff,bits_per_sample,(tsample_t) i,y,(char *) pixels); if (status == -1) break; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; if (image->colorspace != CMYKColorspace) switch (i) { case 0: quantum_type=RedQuantum; break; case 1: quantum_type=GreenQuantum; break; case 2: quantum_type=BlueQuantum; break; case 3: quantum_type=AlphaQuantum; break; default: quantum_type=UndefinedQuantum; break; } else switch (i) { case 0: quantum_type=CyanQuantum; break; case 1: quantum_type=MagentaQuantum; break; case 2: quantum_type=YellowQuantum; break; case 3: quantum_type=BlackQuantum; break; case 4: quantum_type=AlphaQuantum; break; default: quantum_type=UndefinedQuantum; break; } (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case ReadYCCKMethod: { pixels=GetQuantumPixels(quantum_info); for (y=0; y < (ssize_t) image->rows; y++) { int status; register IndexPacket *indexes; register PixelPacket *magick_restrict q; register ssize_t x; unsigned char *p; status=TIFFReadPixels(tiff,bits_per_sample,0,y,(char *) pixels); if (status == -1) break; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); p=pixels; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(q,ScaleCharToQuantum(ClampYCC((double) *p+ (1.402*(double) *(p+2))-179.456))); SetPixelMagenta(q,ScaleCharToQuantum(ClampYCC((double) *p- (0.34414*(double) *(p+1))-(0.71414*(double ) *(p+2))+ 135.45984))); SetPixelYellow(q,ScaleCharToQuantum(ClampYCC((double) *p+ (1.772*(double) *(p+1))-226.816))); SetPixelBlack(indexes+x,ScaleCharToQuantum((unsigned char)*(p+3))); q++; p+=4; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case ReadStripMethod: { register uint32 *p; /* Convert stripped TIFF image to DirectClass MIFF image. */ i=0; p=(uint32 *) NULL; for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; if (i == 0) { if (TIFFReadRGBAStrip(tiff,(tstrip_t) y,(uint32 *) pixels) == 0) break; i=(ssize_t) MagickMin((ssize_t) rows_per_strip,(ssize_t) image->rows-y); } i--; p=((uint32 *) pixels)+image->columns*i; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum((unsigned char) (TIFFGetR(*p)))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) (TIFFGetG(*p)))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) (TIFFGetB(*p)))); if (image->matte != MagickFalse) SetPixelOpacity(q,ScaleCharToQuantum((unsigned char) (TIFFGetA(*p)))); p++; q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } break; } case ReadTileMethod: { register uint32 *p; uint32 *tile_pixels, columns, rows; /* Convert tiled TIFF image to DirectClass MIFF image. */ if ((TIFFGetField(tiff,TIFFTAG_TILEWIDTH,&columns) != 1) || (TIFFGetField(tiff,TIFFTAG_TILELENGTH,&rows) != 1)) { TIFFClose(tiff); ThrowReaderException(CoderError,"ImageIsNotTiled"); } (void) SetImageStorageClass(image,DirectClass); number_pixels=(MagickSizeType) columns*rows; if ((number_pixels*sizeof(uint32)) != (MagickSizeType) ((size_t) (number_pixels*sizeof(uint32)))) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } tile_pixels=(uint32 *) AcquireQuantumMemory(number_pixels, sizeof(*tile_pixels)); if (tile_pixels == (uint32 *) NULL) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } for (y=0; y < (ssize_t) image->rows; y+=rows) { PixelPacket *tile; register ssize_t x; register PixelPacket *magick_restrict q; size_t columns_remaining, rows_remaining; rows_remaining=image->rows-y; if ((ssize_t) (y+rows) < (ssize_t) image->rows) rows_remaining=rows; tile=QueueAuthenticPixels(image,0,y,image->columns,rows_remaining, exception); if (tile == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x+=columns) { size_t column, row; if (TIFFReadRGBATile(tiff,(uint32) x,(uint32) y,tile_pixels) == 0) break; columns_remaining=image->columns-x; if ((ssize_t) (x+columns) < (ssize_t) image->columns) columns_remaining=columns; p=tile_pixels+(rows-rows_remaining)*columns; q=tile+(image->columns*(rows_remaining-1)+x); for (row=rows_remaining; row > 0; row--) { if (image->matte != MagickFalse) for (column=columns_remaining; column > 0; column--) { SetPixelRed(q,ScaleCharToQuantum((unsigned char) TIFFGetR(*p))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) TIFFGetG(*p))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) TIFFGetB(*p))); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) TIFFGetA(*p))); q++; p++; } else for (column=columns_remaining; column > 0; column--) { SetPixelRed(q,ScaleCharToQuantum((unsigned char) TIFFGetR(*p))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) TIFFGetG(*p))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) TIFFGetB(*p))); q++; p++; } p+=columns-columns_remaining; q-=(image->columns+columns_remaining); } } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } tile_pixels=(uint32 *) RelinquishMagickMemory(tile_pixels); break; } case ReadGenericMethod: default: { MemoryInfo *pixel_info; register uint32 *p; uint32 *pixels; /* Convert TIFF image to DirectClass MIFF image. */ number_pixels=(MagickSizeType) image->columns*image->rows; if ((number_pixels*sizeof(uint32)) != (MagickSizeType) ((size_t) (number_pixels*sizeof(uint32)))) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } pixel_info=AcquireVirtualMemory(image->columns,image->rows* sizeof(uint32)); if (pixel_info == (MemoryInfo *) NULL) { TIFFClose(tiff); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(uint32 *) GetVirtualMemoryBlob(pixel_info); (void) TIFFReadRGBAImage(tiff,(uint32) image->columns,(uint32) image->rows,(uint32 *) pixels,0); /* Convert image to DirectClass pixel packets. */ p=pixels+number_pixels-1; for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; q+=image->columns-1; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum((unsigned char) TIFFGetR(*p))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) TIFFGetG(*p))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) TIFFGetB(*p))); if (image->matte != MagickFalse) SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) TIFFGetA(*p))); p--; q--; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } pixel_info=RelinquishVirtualMemory(pixel_info); break; } } SetQuantumImageType(image,quantum_type); next_tiff_frame: quantum_info=DestroyQuantumInfo(quantum_info); if (photometric == PHOTOMETRIC_CIELAB) DecodeLabImage(image,exception); if ((photometric == PHOTOMETRIC_LOGL) || (photometric == PHOTOMETRIC_MINISBLACK) || (photometric == PHOTOMETRIC_MINISWHITE)) { image->type=GrayscaleType; if (bits_per_sample == 1) image->type=BilevelType; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse; if (status != MagickFalse) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,image->scene-1, image->scene); if (status == MagickFalse) break; } } while (status != MagickFalse); TIFFClose(tiff); TIFFReadPhotoshopLayers(image,image_info,exception); if (image_info->number_scenes != 0) { if (image_info->scene >= GetImageListLength(image)) { /* Subimage was not found in the Photoshop layer */ image = DestroyImageList(image); return((Image *)NULL); } } return(GetFirstImageInList(image)); }
168,626
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool DebugOnStart::FindArgument(wchar_t* command_line, const char* argument_c) { wchar_t argument[50]; for (int i = 0; argument_c[i]; ++i) argument[i] = argument_c[i]; int argument_len = lstrlen(argument); int command_line_len = lstrlen(command_line); while (command_line_len > argument_len) { wchar_t first_char = command_line[0]; wchar_t last_char = command_line[argument_len+1]; if ((first_char == L'-' || first_char == L'/') && (last_char == L' ' || last_char == 0 || last_char == L'=')) { command_line[argument_len+1] = 0; if (lstrcmpi(command_line+1, argument) == 0) { command_line[argument_len+1] = last_char; return true; } command_line[argument_len+1] = last_char; } ++command_line; --command_line_len; } return false; } Commit Message: Fix null-termination on string copy in debug-on-start code. BUG=73740 Review URL: http://codereview.chromium.org/6549019 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@75629 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
bool DebugOnStart::FindArgument(wchar_t* command_line, const char* argument_c) { wchar_t argument[50] = {}; for (int i = 0; argument_c[i]; ++i) argument[i] = argument_c[i]; int argument_len = lstrlen(argument); int command_line_len = lstrlen(command_line); while (command_line_len > argument_len) { wchar_t first_char = command_line[0]; wchar_t last_char = command_line[argument_len+1]; if ((first_char == L'-' || first_char == L'/') && (last_char == L' ' || last_char == 0 || last_char == L'=')) { command_line[argument_len+1] = 0; if (lstrcmpi(command_line+1, argument) == 0) { command_line[argument_len+1] = last_char; return true; } command_line[argument_len+1] = last_char; } ++command_line; --command_line_len; } return false; }
170,650
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: ResourceRequestInfoImpl* ResourceDispatcherHostImpl::CreateRequestInfo( int child_id, int render_view_route_id, int render_frame_route_id, PreviewsState previews_state, bool download, ResourceContext* context) { return new ResourceRequestInfoImpl( ResourceRequesterInfo::CreateForDownloadOrPageSave(child_id), render_view_route_id, -1, // frame_tree_node_id ChildProcessHost::kInvalidUniqueID, // plugin_child_id MakeRequestID(), render_frame_route_id, false, // is_main_frame {}, // fetch_window_id RESOURCE_TYPE_SUB_RESOURCE, ui::PAGE_TRANSITION_LINK, download, // is_download false, // is_stream download, // allow_download false, // has_user_gesture false, // enable_load_timing false, // enable_upload_progress false, // do_not_prompt_for_login false, // keepalive network::mojom::ReferrerPolicy::kDefault, false, // is_prerendering context, false, // report_raw_headers false, // report_security_info true, // is_async previews_state, // previews_state nullptr, // body false); // initiated_in_secure_context } Commit Message: When turning a download into a navigation, navigate the right frame Code changes from Nate Chapin <[email protected]> Bug: 926105 Change-Id: I098599394e6ebe7d2fce5af838014297a337d294 Reviewed-on: https://chromium-review.googlesource.com/c/1454962 Reviewed-by: Camille Lamy <[email protected]> Commit-Queue: Jochen Eisinger <[email protected]> Cr-Commit-Position: refs/heads/master@{#629547} CWE ID: CWE-284
ResourceRequestInfoImpl* ResourceDispatcherHostImpl::CreateRequestInfo( int child_id, int render_view_route_id, int render_frame_route_id, int frame_tree_node_id, PreviewsState previews_state, bool download, ResourceContext* context) { return new ResourceRequestInfoImpl( ResourceRequesterInfo::CreateForDownloadOrPageSave(child_id), render_view_route_id, frame_tree_node_id, ChildProcessHost::kInvalidUniqueID, // plugin_child_id MakeRequestID(), render_frame_route_id, false, // is_main_frame {}, // fetch_window_id RESOURCE_TYPE_SUB_RESOURCE, ui::PAGE_TRANSITION_LINK, download, // is_download false, // is_stream download, // allow_download false, // has_user_gesture false, // enable_load_timing false, // enable_upload_progress false, // do_not_prompt_for_login false, // keepalive network::mojom::ReferrerPolicy::kDefault, false, // is_prerendering context, false, // report_raw_headers false, // report_security_info true, // is_async previews_state, // previews_state nullptr, // body false); // initiated_in_secure_context }
173,026
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: gss_context_time (minor_status, context_handle, time_rec) OM_uint32 * minor_status; gss_ctx_id_t context_handle; OM_uint32 * time_rec; { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; if (minor_status == NULL) return (GSS_S_CALL_INACCESSIBLE_WRITE); *minor_status = 0; if (time_rec == NULL) return (GSS_S_CALL_INACCESSIBLE_WRITE); if (context_handle == GSS_C_NO_CONTEXT) return (GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t) context_handle; mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_context_time) { status = mech->gss_context_time( minor_status, ctx->internal_ctx_id, time_rec); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else status = GSS_S_UNAVAILABLE; return(status); } return (GSS_S_BAD_MECH); } Commit Message: Preserve GSS context on init/accept failure After gss_init_sec_context() or gss_accept_sec_context() has created a context, don't delete the mechglue context on failures from subsequent calls, even if the mechanism deletes the mech-specific context (which is allowed by RFC 2744 but not preferred). Check for union contexts with no mechanism context in each GSS function which accepts a gss_ctx_id_t. CVE-2017-11462: RFC 2744 permits a GSS-API implementation to delete an existing security context on a second or subsequent call to gss_init_sec_context() or gss_accept_sec_context() if the call results in an error. This API behavior has been found to be dangerous, leading to the possibility of memory errors in some callers. For safety, GSS-API implementations should instead preserve existing security contexts on error until the caller deletes them. All versions of MIT krb5 prior to this change may delete acceptor contexts on error. Versions 1.13.4 through 1.13.7, 1.14.1 through 1.14.5, and 1.15 through 1.15.1 may also delete initiator contexts on error. ticket: 8598 (new) target_version: 1.15-next target_version: 1.14-next tags: pullup CWE ID: CWE-415
gss_context_time (minor_status, context_handle, time_rec) OM_uint32 * minor_status; gss_ctx_id_t context_handle; OM_uint32 * time_rec; { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; if (minor_status == NULL) return (GSS_S_CALL_INACCESSIBLE_WRITE); *minor_status = 0; if (time_rec == NULL) return (GSS_S_CALL_INACCESSIBLE_WRITE); if (context_handle == GSS_C_NO_CONTEXT) return (GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_context_time) { status = mech->gss_context_time( minor_status, ctx->internal_ctx_id, time_rec); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else status = GSS_S_UNAVAILABLE; return(status); } return (GSS_S_BAD_MECH); }
168,013
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static Image *ReadEMFImage(const ImageInfo *image_info, ExceptionInfo *exception) { BITMAPINFO DIBinfo; HBITMAP hBitmap, hOldBitmap; HDC hDC; HENHMETAFILE hemf; Image *image; RECT rect; register ssize_t x; register PixelPacket *q; RGBQUAD *pBits, *ppBits; ssize_t height, width, y; image=AcquireImage(image_info); hemf=ReadEnhMetaFile(image_info->filename,&width,&height); if (hemf == (HENHMETAFILE) NULL) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if ((image->columns == 0) || (image->rows == 0)) { double y_resolution, x_resolution; y_resolution=DefaultResolution; x_resolution=DefaultResolution; if (image->y_resolution > 0) { y_resolution=image->y_resolution; if (image->units == PixelsPerCentimeterResolution) y_resolution*=CENTIMETERS_INCH; } if (image->x_resolution > 0) { x_resolution=image->x_resolution; if (image->units == PixelsPerCentimeterResolution) x_resolution*=CENTIMETERS_INCH; } image->rows=(size_t) ((height/1000.0/CENTIMETERS_INCH)*y_resolution+0.5); image->columns=(size_t) ((width/1000.0/CENTIMETERS_INCH)* x_resolution+0.5); } if (image_info->size != (char *) NULL) { ssize_t x; image->columns=width; image->rows=height; x=0; y=0; (void) GetGeometry(image_info->size,&x,&y,&image->columns,&image->rows); } if (image_info->page != (char *) NULL) { char *geometry; register char *p; MagickStatusType flags; ssize_t sans; geometry=GetPageGeometry(image_info->page); p=strchr(geometry,'>'); if (p == (char *) NULL) { flags=ParseMetaGeometry(geometry,&sans,&sans,&image->columns, &image->rows); if (image->x_resolution != 0.0) image->columns=(size_t) floor((image->columns*image->x_resolution)+ 0.5); if (image->y_resolution != 0.0) image->rows=(size_t) floor((image->rows*image->y_resolution)+0.5); } else { *p='\0'; flags=ParseMetaGeometry(geometry,&sans,&sans,&image->columns, &image->rows); if (image->x_resolution != 0.0) image->columns=(size_t) floor(((image->columns*image->x_resolution)/ DefaultResolution)+0.5); if (image->y_resolution != 0.0) image->rows=(size_t) floor(((image->rows*image->y_resolution)/ DefaultResolution)+0.5); } (void) flags; geometry=DestroyString(geometry); } hDC=GetDC(NULL); if (hDC == (HDC) NULL) { DeleteEnhMetaFile(hemf); ThrowReaderException(ResourceLimitError,"UnableToCreateADC"); } /* Initialize the bitmap header info. */ (void) ResetMagickMemory(&DIBinfo,0,sizeof(BITMAPINFO)); DIBinfo.bmiHeader.biSize=sizeof(BITMAPINFOHEADER); DIBinfo.bmiHeader.biWidth=(LONG) image->columns; DIBinfo.bmiHeader.biHeight=(-1)*(LONG) image->rows; DIBinfo.bmiHeader.biPlanes=1; DIBinfo.bmiHeader.biBitCount=32; DIBinfo.bmiHeader.biCompression=BI_RGB; hBitmap=CreateDIBSection(hDC,&DIBinfo,DIB_RGB_COLORS,(void **) &ppBits,NULL, 0); ReleaseDC(NULL,hDC); if (hBitmap == (HBITMAP) NULL) { DeleteEnhMetaFile(hemf); ThrowReaderException(ResourceLimitError,"UnableToCreateBitmap"); } hDC=CreateCompatibleDC(NULL); if (hDC == (HDC) NULL) { DeleteEnhMetaFile(hemf); DeleteObject(hBitmap); ThrowReaderException(ResourceLimitError,"UnableToCreateADC"); } hOldBitmap=(HBITMAP) SelectObject(hDC,hBitmap); if (hOldBitmap == (HBITMAP) NULL) { DeleteEnhMetaFile(hemf); DeleteDC(hDC); DeleteObject(hBitmap); ThrowReaderException(ResourceLimitError,"UnableToCreateBitmap"); } /* Initialize the bitmap to the image background color. */ pBits=ppBits; for (y=0; y < (ssize_t) image->rows; y++) { for (x=0; x < (ssize_t) image->columns; x++) { pBits->rgbRed=ScaleQuantumToChar(image->background_color.red); pBits->rgbGreen=ScaleQuantumToChar(image->background_color.green); pBits->rgbBlue=ScaleQuantumToChar(image->background_color.blue); pBits++; } } rect.top=0; rect.left=0; rect.right=(LONG) image->columns; rect.bottom=(LONG) image->rows; /* Convert metafile pixels. */ PlayEnhMetaFile(hDC,hemf,&rect); pBits=ppBits; for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum(pBits->rgbRed)); SetPixelGreen(q,ScaleCharToQuantum(pBits->rgbGreen)); SetPixelBlue(q,ScaleCharToQuantum(pBits->rgbBlue)); SetPixelOpacity(q,OpaqueOpacity); pBits++; q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } DeleteEnhMetaFile(hemf); SelectObject(hDC,hOldBitmap); DeleteDC(hDC); DeleteObject(hBitmap); return(GetFirstImageInList(image)); } Commit Message: CWE ID: CWE-119
static Image *ReadEMFImage(const ImageInfo *image_info, ExceptionInfo *exception) { BITMAPINFO DIBinfo; HBITMAP hBitmap, hOldBitmap; HDC hDC; HENHMETAFILE hemf; Image *image; RECT rect; register ssize_t x; register PixelPacket *q; RGBQUAD *pBits, *ppBits; ssize_t height, width, y; image=AcquireImage(image_info); hemf=ReadEnhMetaFile(image_info->filename,&width,&height); if (hemf == (HENHMETAFILE) NULL) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if ((image->columns == 0) || (image->rows == 0)) { double y_resolution, x_resolution; y_resolution=DefaultResolution; x_resolution=DefaultResolution; if (image->y_resolution > 0) { y_resolution=image->y_resolution; if (image->units == PixelsPerCentimeterResolution) y_resolution*=CENTIMETERS_INCH; } if (image->x_resolution > 0) { x_resolution=image->x_resolution; if (image->units == PixelsPerCentimeterResolution) x_resolution*=CENTIMETERS_INCH; } image->rows=(size_t) ((height/1000.0/CENTIMETERS_INCH)*y_resolution+0.5); image->columns=(size_t) ((width/1000.0/CENTIMETERS_INCH)* x_resolution+0.5); } if (image_info->size != (char *) NULL) { ssize_t x; image->columns=width; image->rows=height; x=0; y=0; (void) GetGeometry(image_info->size,&x,&y,&image->columns,&image->rows); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } if (image_info->page != (char *) NULL) { char *geometry; register char *p; MagickStatusType flags; ssize_t sans; geometry=GetPageGeometry(image_info->page); p=strchr(geometry,'>'); if (p == (char *) NULL) { flags=ParseMetaGeometry(geometry,&sans,&sans,&image->columns, &image->rows); if (image->x_resolution != 0.0) image->columns=(size_t) floor((image->columns*image->x_resolution)+ 0.5); if (image->y_resolution != 0.0) image->rows=(size_t) floor((image->rows*image->y_resolution)+0.5); } else { *p='\0'; flags=ParseMetaGeometry(geometry,&sans,&sans,&image->columns, &image->rows); if (image->x_resolution != 0.0) image->columns=(size_t) floor(((image->columns*image->x_resolution)/ DefaultResolution)+0.5); if (image->y_resolution != 0.0) image->rows=(size_t) floor(((image->rows*image->y_resolution)/ DefaultResolution)+0.5); } (void) flags; geometry=DestroyString(geometry); } hDC=GetDC(NULL); if (hDC == (HDC) NULL) { DeleteEnhMetaFile(hemf); ThrowReaderException(ResourceLimitError,"UnableToCreateADC"); } /* Initialize the bitmap header info. */ (void) ResetMagickMemory(&DIBinfo,0,sizeof(BITMAPINFO)); DIBinfo.bmiHeader.biSize=sizeof(BITMAPINFOHEADER); DIBinfo.bmiHeader.biWidth=(LONG) image->columns; DIBinfo.bmiHeader.biHeight=(-1)*(LONG) image->rows; DIBinfo.bmiHeader.biPlanes=1; DIBinfo.bmiHeader.biBitCount=32; DIBinfo.bmiHeader.biCompression=BI_RGB; hBitmap=CreateDIBSection(hDC,&DIBinfo,DIB_RGB_COLORS,(void **) &ppBits,NULL, 0); ReleaseDC(NULL,hDC); if (hBitmap == (HBITMAP) NULL) { DeleteEnhMetaFile(hemf); ThrowReaderException(ResourceLimitError,"UnableToCreateBitmap"); } hDC=CreateCompatibleDC(NULL); if (hDC == (HDC) NULL) { DeleteEnhMetaFile(hemf); DeleteObject(hBitmap); ThrowReaderException(ResourceLimitError,"UnableToCreateADC"); } hOldBitmap=(HBITMAP) SelectObject(hDC,hBitmap); if (hOldBitmap == (HBITMAP) NULL) { DeleteEnhMetaFile(hemf); DeleteDC(hDC); DeleteObject(hBitmap); ThrowReaderException(ResourceLimitError,"UnableToCreateBitmap"); } /* Initialize the bitmap to the image background color. */ pBits=ppBits; for (y=0; y < (ssize_t) image->rows; y++) { for (x=0; x < (ssize_t) image->columns; x++) { pBits->rgbRed=ScaleQuantumToChar(image->background_color.red); pBits->rgbGreen=ScaleQuantumToChar(image->background_color.green); pBits->rgbBlue=ScaleQuantumToChar(image->background_color.blue); pBits++; } } rect.top=0; rect.left=0; rect.right=(LONG) image->columns; rect.bottom=(LONG) image->rows; /* Convert metafile pixels. */ PlayEnhMetaFile(hDC,hemf,&rect); pBits=ppBits; for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum(pBits->rgbRed)); SetPixelGreen(q,ScaleCharToQuantum(pBits->rgbGreen)); SetPixelBlue(q,ScaleCharToQuantum(pBits->rgbBlue)); SetPixelOpacity(q,OpaqueOpacity); pBits++; q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } DeleteEnhMetaFile(hemf); SelectObject(hDC,hOldBitmap); DeleteDC(hDC); DeleteObject(hBitmap); return(GetFirstImageInList(image)); }
168,562
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static Image *ReadGRAYImage(const ImageInfo *image_info, ExceptionInfo *exception) { Image *canvas_image, *image; MagickBooleanType status; MagickOffsetType scene; QuantumInfo *quantum_info; QuantumType quantum_type; size_t length; ssize_t count, y; unsigned char *pixels; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); if ((image->columns == 0) || (image->rows == 0)) ThrowReaderException(OptionError,"MustSpecifyImageSize"); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } if (DiscardBlobBytes(image,(size_t) image->offset) == MagickFalse) ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); /* Create virtual canvas to support cropping (i.e. image.gray[100x100+10+20]). */ SetImageColorspace(image,GRAYColorspace); canvas_image=CloneImage(image,image->extract_info.width,1,MagickFalse, exception); (void) SetImageVirtualPixelMethod(canvas_image,BlackVirtualPixelMethod); quantum_type=GrayQuantum; quantum_info=AcquireQuantumInfo(image_info,canvas_image); if (quantum_info == (QuantumInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); pixels=GetQuantumPixels(quantum_info); if (image_info->number_scenes != 0) while (image->scene < image_info->scene) { /* Skip to next image. */ image->scene++; length=GetQuantumExtent(canvas_image,quantum_info,quantum_type); for (y=0; y < (ssize_t) image->rows; y++) { count=ReadBlob(image,length,pixels); if (count != (ssize_t) length) break; } } scene=0; count=0; length=0; do { /* Read pixels to virtual canvas image then push to image. */ if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; SetImageColorspace(image,GRAYColorspace); if (scene == 0) { length=GetQuantumExtent(canvas_image,quantum_info,quantum_type); count=ReadBlob(image,length,pixels); } for (y=0; y < (ssize_t) image->extract_info.height; y++) { register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; if (count != (ssize_t) length) { ThrowFileException(exception,CorruptImageError, "UnexpectedEndOfFile",image->filename); break; } q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,exception); if (q == (PixelPacket *) NULL) break; length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse) break; if (((y-image->extract_info.y) >= 0) && ((y-image->extract_info.y) < (ssize_t) image->rows)) { p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0, image->columns,1,exception); q=QueueAuthenticPixels(image,0,y-image->extract_info.y,image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); p++; q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } count=ReadBlob(image,length,pixels); } SetQuantumImageType(image,quantum_type); /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; if (count == (ssize_t) length) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } scene++; } while (count == (ssize_t) length); quantum_info=DestroyQuantumInfo(quantum_info); InheritException(&image->exception,&canvas_image->exception); canvas_image=DestroyImage(canvas_image); (void) CloseBlob(image); return(GetFirstImageInList(image)); } Commit Message: CWE ID: CWE-119
static Image *ReadGRAYImage(const ImageInfo *image_info, ExceptionInfo *exception) { Image *canvas_image, *image; MagickBooleanType status; MagickOffsetType scene; QuantumInfo *quantum_info; QuantumType quantum_type; size_t length; ssize_t count, y; unsigned char *pixels; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); if ((image->columns == 0) || (image->rows == 0)) ThrowReaderException(OptionError,"MustSpecifyImageSize"); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } if (DiscardBlobBytes(image,(size_t) image->offset) == MagickFalse) ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); /* Create virtual canvas to support cropping (i.e. image.gray[100x100+10+20]). */ SetImageColorspace(image,GRAYColorspace); canvas_image=CloneImage(image,image->extract_info.width,1,MagickFalse, exception); (void) SetImageVirtualPixelMethod(canvas_image,BlackVirtualPixelMethod); quantum_type=GrayQuantum; quantum_info=AcquireQuantumInfo(image_info,canvas_image); if (quantum_info == (QuantumInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); pixels=GetQuantumPixels(quantum_info); if (image_info->number_scenes != 0) while (image->scene < image_info->scene) { /* Skip to next image. */ image->scene++; length=GetQuantumExtent(canvas_image,quantum_info,quantum_type); for (y=0; y < (ssize_t) image->rows; y++) { count=ReadBlob(image,length,pixels); if (count != (ssize_t) length) break; } } scene=0; count=0; length=0; do { /* Read pixels to virtual canvas image then push to image. */ if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } SetImageColorspace(image,GRAYColorspace); if (scene == 0) { length=GetQuantumExtent(canvas_image,quantum_info,quantum_type); count=ReadBlob(image,length,pixels); } for (y=0; y < (ssize_t) image->extract_info.height; y++) { register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; if (count != (ssize_t) length) { ThrowFileException(exception,CorruptImageError, "UnexpectedEndOfFile",image->filename); break; } q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,exception); if (q == (PixelPacket *) NULL) break; length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse) break; if (((y-image->extract_info.y) >= 0) && ((y-image->extract_info.y) < (ssize_t) image->rows)) { p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0, image->columns,1,exception); q=QueueAuthenticPixels(image,0,y-image->extract_info.y,image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); p++; q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } count=ReadBlob(image,length,pixels); } SetQuantumImageType(image,quantum_type); /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; if (count == (ssize_t) length) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } scene++; } while (count == (ssize_t) length); quantum_info=DestroyQuantumInfo(quantum_info); InheritException(&image->exception,&canvas_image->exception); canvas_image=DestroyImage(canvas_image); (void) CloseBlob(image); return(GetFirstImageInList(image)); }
168,568
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ThreadableBlobRegistry::registerBlobURL(const KURL& url, PassOwnPtr<BlobData> blobData) { if (isMainThread()) blobRegistry().registerBlobURL(url, blobData); else { OwnPtr<BlobRegistryContext> context = adoptPtr(new BlobRegistryContext(url, blobData)); callOnMainThread(&registerBlobURLTask, context.leakPtr()); } } Commit Message: Remove BlobRegistry indirection since there is only one implementation. BUG= Review URL: https://chromiumcodereview.appspot.com/15851008 git-svn-id: svn://svn.chromium.org/blink/trunk@152746 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID:
void ThreadableBlobRegistry::registerBlobURL(const KURL& url, PassOwnPtr<BlobData> blobData) void BlobRegistry::registerBlobURL(const KURL& url, PassOwnPtr<BlobData> blobData) { if (isMainThread()) { if (WebBlobRegistry* registry = blobRegistry()) { WebBlobData webBlobData(blobData); registry->registerBlobURL(url, webBlobData); } } else { OwnPtr<BlobRegistryContext> context = adoptPtr(new BlobRegistryContext(url, blobData)); callOnMainThread(&registerBlobURLTask, context.leakPtr()); } }
170,684
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int hls_slice_header(HEVCContext *s) { GetBitContext *gb = &s->HEVClc->gb; SliceHeader *sh = &s->sh; int i, ret; sh->first_slice_in_pic_flag = get_bits1(gb); if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) { s->seq_decode = (s->seq_decode + 1) & 0xff; s->max_ra = INT_MAX; if (IS_IDR(s)) ff_hevc_clear_refs(s); } sh->no_output_of_prior_pics_flag = 0; if (IS_IRAP(s)) sh->no_output_of_prior_pics_flag = get_bits1(gb); sh->pps_id = get_ue_golomb_long(gb); if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) { av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id); return AVERROR_INVALIDDATA; } if (!sh->first_slice_in_pic_flag && s->ps.pps != (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data) { av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n"); return AVERROR_INVALIDDATA; } s->ps.pps = (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data; if (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos == 1) sh->no_output_of_prior_pics_flag = 1; if (s->ps.sps != (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data) { const HEVCSPS *sps = (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data; const HEVCSPS *last_sps = s->ps.sps; enum AVPixelFormat pix_fmt; if (last_sps && IS_IRAP(s) && s->nal_unit_type != HEVC_NAL_CRA_NUT) { if (sps->width != last_sps->width || sps->height != last_sps->height || sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering != last_sps->temporal_layer[last_sps->max_sub_layers - 1].max_dec_pic_buffering) sh->no_output_of_prior_pics_flag = 0; } ff_hevc_clear_refs(s); ret = set_sps(s, sps, sps->pix_fmt); if (ret < 0) return ret; pix_fmt = get_format(s, sps); if (pix_fmt < 0) return pix_fmt; s->avctx->pix_fmt = pix_fmt; s->seq_decode = (s->seq_decode + 1) & 0xff; s->max_ra = INT_MAX; } sh->dependent_slice_segment_flag = 0; if (!sh->first_slice_in_pic_flag) { int slice_address_length; if (s->ps.pps->dependent_slice_segments_enabled_flag) sh->dependent_slice_segment_flag = get_bits1(gb); slice_address_length = av_ceil_log2(s->ps.sps->ctb_width * s->ps.sps->ctb_height); sh->slice_segment_addr = get_bitsz(gb, slice_address_length); if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) { av_log(s->avctx, AV_LOG_ERROR, "Invalid slice segment address: %u.\n", sh->slice_segment_addr); return AVERROR_INVALIDDATA; } if (!sh->dependent_slice_segment_flag) { sh->slice_addr = sh->slice_segment_addr; s->slice_idx++; } } else { sh->slice_segment_addr = sh->slice_addr = 0; s->slice_idx = 0; s->slice_initialized = 0; } if (!sh->dependent_slice_segment_flag) { s->slice_initialized = 0; for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++) skip_bits(gb, 1); // slice_reserved_undetermined_flag[] sh->slice_type = get_ue_golomb_long(gb); if (!(sh->slice_type == HEVC_SLICE_I || sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B)) { av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n", sh->slice_type); return AVERROR_INVALIDDATA; } if (IS_IRAP(s) && sh->slice_type != HEVC_SLICE_I) { av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n"); return AVERROR_INVALIDDATA; } sh->pic_output_flag = 1; if (s->ps.pps->output_flag_present_flag) sh->pic_output_flag = get_bits1(gb); if (s->ps.sps->separate_colour_plane_flag) sh->colour_plane_id = get_bits(gb, 2); if (!IS_IDR(s)) { int poc, pos; sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb); poc = ff_hevc_compute_poc(s->ps.sps, s->pocTid0, sh->pic_order_cnt_lsb, s->nal_unit_type); if (!sh->first_slice_in_pic_flag && poc != s->poc) { av_log(s->avctx, AV_LOG_WARNING, "Ignoring POC change between slices: %d -> %d\n", s->poc, poc); if (s->avctx->err_recognition & AV_EF_EXPLODE) return AVERROR_INVALIDDATA; poc = s->poc; } s->poc = poc; sh->short_term_ref_pic_set_sps_flag = get_bits1(gb); pos = get_bits_left(gb); if (!sh->short_term_ref_pic_set_sps_flag) { ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1); if (ret < 0) return ret; sh->short_term_rps = &sh->slice_rps; } else { int numbits, rps_idx; if (!s->ps.sps->nb_st_rps) { av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n"); return AVERROR_INVALIDDATA; } numbits = av_ceil_log2(s->ps.sps->nb_st_rps); rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0; sh->short_term_rps = &s->ps.sps->st_rps[rps_idx]; } sh->short_term_ref_pic_set_size = pos - get_bits_left(gb); pos = get_bits_left(gb); ret = decode_lt_rps(s, &sh->long_term_rps, gb); if (ret < 0) { av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n"); if (s->avctx->err_recognition & AV_EF_EXPLODE) return AVERROR_INVALIDDATA; } sh->long_term_ref_pic_set_size = pos - get_bits_left(gb); if (s->ps.sps->sps_temporal_mvp_enabled_flag) sh->slice_temporal_mvp_enabled_flag = get_bits1(gb); else sh->slice_temporal_mvp_enabled_flag = 0; } else { s->sh.short_term_rps = NULL; s->poc = 0; } /* 8.3.1 */ if (sh->first_slice_in_pic_flag && s->temporal_id == 0 && s->nal_unit_type != HEVC_NAL_TRAIL_N && s->nal_unit_type != HEVC_NAL_TSA_N && s->nal_unit_type != HEVC_NAL_STSA_N && s->nal_unit_type != HEVC_NAL_RADL_N && s->nal_unit_type != HEVC_NAL_RADL_R && s->nal_unit_type != HEVC_NAL_RASL_N && s->nal_unit_type != HEVC_NAL_RASL_R) s->pocTid0 = s->poc; if (s->ps.sps->sao_enabled) { sh->slice_sample_adaptive_offset_flag[0] = get_bits1(gb); if (s->ps.sps->chroma_format_idc) { sh->slice_sample_adaptive_offset_flag[1] = sh->slice_sample_adaptive_offset_flag[2] = get_bits1(gb); } } else { sh->slice_sample_adaptive_offset_flag[0] = 0; sh->slice_sample_adaptive_offset_flag[1] = 0; sh->slice_sample_adaptive_offset_flag[2] = 0; } sh->nb_refs[L0] = sh->nb_refs[L1] = 0; if (sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B) { int nb_refs; sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active; if (sh->slice_type == HEVC_SLICE_B) sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active; if (get_bits1(gb)) { // num_ref_idx_active_override_flag sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1; if (sh->slice_type == HEVC_SLICE_B) sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1; } if (sh->nb_refs[L0] > HEVC_MAX_REFS || sh->nb_refs[L1] > HEVC_MAX_REFS) { av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n", sh->nb_refs[L0], sh->nb_refs[L1]); return AVERROR_INVALIDDATA; } sh->rpl_modification_flag[0] = 0; sh->rpl_modification_flag[1] = 0; nb_refs = ff_hevc_frame_nb_refs(s); if (!nb_refs) { av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n"); return AVERROR_INVALIDDATA; } if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) { sh->rpl_modification_flag[0] = get_bits1(gb); if (sh->rpl_modification_flag[0]) { for (i = 0; i < sh->nb_refs[L0]; i++) sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs)); } if (sh->slice_type == HEVC_SLICE_B) { sh->rpl_modification_flag[1] = get_bits1(gb); if (sh->rpl_modification_flag[1] == 1) for (i = 0; i < sh->nb_refs[L1]; i++) sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs)); } } if (sh->slice_type == HEVC_SLICE_B) sh->mvd_l1_zero_flag = get_bits1(gb); if (s->ps.pps->cabac_init_present_flag) sh->cabac_init_flag = get_bits1(gb); else sh->cabac_init_flag = 0; sh->collocated_ref_idx = 0; if (sh->slice_temporal_mvp_enabled_flag) { sh->collocated_list = L0; if (sh->slice_type == HEVC_SLICE_B) sh->collocated_list = !get_bits1(gb); if (sh->nb_refs[sh->collocated_list] > 1) { sh->collocated_ref_idx = get_ue_golomb_long(gb); if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) { av_log(s->avctx, AV_LOG_ERROR, "Invalid collocated_ref_idx: %d.\n", sh->collocated_ref_idx); return AVERROR_INVALIDDATA; } } } if ((s->ps.pps->weighted_pred_flag && sh->slice_type == HEVC_SLICE_P) || (s->ps.pps->weighted_bipred_flag && sh->slice_type == HEVC_SLICE_B)) { int ret = pred_weight_table(s, gb); if (ret < 0) return ret; } sh->max_num_merge_cand = 5 - get_ue_golomb_long(gb); if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) { av_log(s->avctx, AV_LOG_ERROR, "Invalid number of merging MVP candidates: %d.\n", sh->max_num_merge_cand); return AVERROR_INVALIDDATA; } } sh->slice_qp_delta = get_se_golomb(gb); if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) { sh->slice_cb_qp_offset = get_se_golomb(gb); sh->slice_cr_qp_offset = get_se_golomb(gb); } else { sh->slice_cb_qp_offset = 0; sh->slice_cr_qp_offset = 0; } if (s->ps.pps->chroma_qp_offset_list_enabled_flag) sh->cu_chroma_qp_offset_enabled_flag = get_bits1(gb); else sh->cu_chroma_qp_offset_enabled_flag = 0; if (s->ps.pps->deblocking_filter_control_present_flag) { int deblocking_filter_override_flag = 0; if (s->ps.pps->deblocking_filter_override_enabled_flag) deblocking_filter_override_flag = get_bits1(gb); if (deblocking_filter_override_flag) { sh->disable_deblocking_filter_flag = get_bits1(gb); if (!sh->disable_deblocking_filter_flag) { int beta_offset_div2 = get_se_golomb(gb); int tc_offset_div2 = get_se_golomb(gb) ; if (beta_offset_div2 < -6 || beta_offset_div2 > 6 || tc_offset_div2 < -6 || tc_offset_div2 > 6) { av_log(s->avctx, AV_LOG_ERROR, "Invalid deblock filter offsets: %d, %d\n", beta_offset_div2, tc_offset_div2); return AVERROR_INVALIDDATA; } sh->beta_offset = beta_offset_div2 * 2; sh->tc_offset = tc_offset_div2 * 2; } } else { sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf; sh->beta_offset = s->ps.pps->beta_offset; sh->tc_offset = s->ps.pps->tc_offset; } } else { sh->disable_deblocking_filter_flag = 0; sh->beta_offset = 0; sh->tc_offset = 0; } if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag && (sh->slice_sample_adaptive_offset_flag[0] || sh->slice_sample_adaptive_offset_flag[1] || !sh->disable_deblocking_filter_flag)) { sh->slice_loop_filter_across_slices_enabled_flag = get_bits1(gb); } else { sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag; } } else if (!s->slice_initialized) { av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n"); return AVERROR_INVALIDDATA; } sh->num_entry_point_offsets = 0; if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) { unsigned num_entry_point_offsets = get_ue_golomb_long(gb); if (num_entry_point_offsets > get_bits_left(gb)) { av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets); return AVERROR_INVALIDDATA; } sh->num_entry_point_offsets = num_entry_point_offsets; if (sh->num_entry_point_offsets > 0) { int offset_len = get_ue_golomb_long(gb) + 1; if (offset_len < 1 || offset_len > 32) { sh->num_entry_point_offsets = 0; av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len); return AVERROR_INVALIDDATA; } av_freep(&sh->entry_point_offset); av_freep(&sh->offset); av_freep(&sh->size); sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned)); sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int)); sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int)); if (!sh->entry_point_offset || !sh->offset || !sh->size) { sh->num_entry_point_offsets = 0; av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n"); return AVERROR(ENOMEM); } for (i = 0; i < sh->num_entry_point_offsets; i++) { unsigned val = get_bits_long(gb, offset_len); sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size } if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) { s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here s->threads_number = 1; } else s->enable_parallel_tiles = 0; } else s->enable_parallel_tiles = 0; } if (s->ps.pps->slice_header_extension_present_flag) { unsigned int length = get_ue_golomb_long(gb); if (length*8LL > get_bits_left(gb)) { av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n"); return AVERROR_INVALIDDATA; } for (i = 0; i < length; i++) skip_bits(gb, 8); // slice_header_extension_data_byte } sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta; if (sh->slice_qp > 51 || sh->slice_qp < -s->ps.sps->qp_bd_offset) { av_log(s->avctx, AV_LOG_ERROR, "The slice_qp %d is outside the valid range " "[%d, 51].\n", sh->slice_qp, -s->ps.sps->qp_bd_offset); return AVERROR_INVALIDDATA; } sh->slice_ctb_addr_rs = sh->slice_segment_addr; if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) { av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n"); return AVERROR_INVALIDDATA; } if (get_bits_left(gb) < 0) { av_log(s->avctx, AV_LOG_ERROR, "Overread slice header by %d bits\n", -get_bits_left(gb)); return AVERROR_INVALIDDATA; } s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag; if (!s->ps.pps->cu_qp_delta_enabled_flag) s->HEVClc->qp_y = s->sh.slice_qp; s->slice_initialized = 1; s->HEVClc->tu.cu_qp_offset_cb = 0; s->HEVClc->tu.cu_qp_offset_cr = 0; return 0; } Commit Message: avcodec/hevcdec: Avoid only partly skiping duplicate first slices Fixes: NULL pointer dereference and out of array access Fixes: 13871/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_HEVC_fuzzer-5746167087890432 Fixes: 13845/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_HEVC_fuzzer-5650370728034304 This also fixes the return code for explode mode Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg Reviewed-by: James Almer <[email protected]> Signed-off-by: Michael Niedermayer <[email protected]> CWE ID: CWE-476
static int hls_slice_header(HEVCContext *s) { GetBitContext *gb = &s->HEVClc->gb; SliceHeader *sh = &s->sh; int i, ret; sh->first_slice_in_pic_flag = get_bits1(gb); if (s->ref && sh->first_slice_in_pic_flag) { av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n"); return 1; // This slice will be skiped later, do not corrupt state } if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) { s->seq_decode = (s->seq_decode + 1) & 0xff; s->max_ra = INT_MAX; if (IS_IDR(s)) ff_hevc_clear_refs(s); } sh->no_output_of_prior_pics_flag = 0; if (IS_IRAP(s)) sh->no_output_of_prior_pics_flag = get_bits1(gb); sh->pps_id = get_ue_golomb_long(gb); if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) { av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id); return AVERROR_INVALIDDATA; } if (!sh->first_slice_in_pic_flag && s->ps.pps != (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data) { av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n"); return AVERROR_INVALIDDATA; } s->ps.pps = (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data; if (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos == 1) sh->no_output_of_prior_pics_flag = 1; if (s->ps.sps != (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data) { const HEVCSPS *sps = (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data; const HEVCSPS *last_sps = s->ps.sps; enum AVPixelFormat pix_fmt; if (last_sps && IS_IRAP(s) && s->nal_unit_type != HEVC_NAL_CRA_NUT) { if (sps->width != last_sps->width || sps->height != last_sps->height || sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering != last_sps->temporal_layer[last_sps->max_sub_layers - 1].max_dec_pic_buffering) sh->no_output_of_prior_pics_flag = 0; } ff_hevc_clear_refs(s); ret = set_sps(s, sps, sps->pix_fmt); if (ret < 0) return ret; pix_fmt = get_format(s, sps); if (pix_fmt < 0) return pix_fmt; s->avctx->pix_fmt = pix_fmt; s->seq_decode = (s->seq_decode + 1) & 0xff; s->max_ra = INT_MAX; } sh->dependent_slice_segment_flag = 0; if (!sh->first_slice_in_pic_flag) { int slice_address_length; if (s->ps.pps->dependent_slice_segments_enabled_flag) sh->dependent_slice_segment_flag = get_bits1(gb); slice_address_length = av_ceil_log2(s->ps.sps->ctb_width * s->ps.sps->ctb_height); sh->slice_segment_addr = get_bitsz(gb, slice_address_length); if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) { av_log(s->avctx, AV_LOG_ERROR, "Invalid slice segment address: %u.\n", sh->slice_segment_addr); return AVERROR_INVALIDDATA; } if (!sh->dependent_slice_segment_flag) { sh->slice_addr = sh->slice_segment_addr; s->slice_idx++; } } else { sh->slice_segment_addr = sh->slice_addr = 0; s->slice_idx = 0; s->slice_initialized = 0; } if (!sh->dependent_slice_segment_flag) { s->slice_initialized = 0; for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++) skip_bits(gb, 1); // slice_reserved_undetermined_flag[] sh->slice_type = get_ue_golomb_long(gb); if (!(sh->slice_type == HEVC_SLICE_I || sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B)) { av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n", sh->slice_type); return AVERROR_INVALIDDATA; } if (IS_IRAP(s) && sh->slice_type != HEVC_SLICE_I) { av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n"); return AVERROR_INVALIDDATA; } sh->pic_output_flag = 1; if (s->ps.pps->output_flag_present_flag) sh->pic_output_flag = get_bits1(gb); if (s->ps.sps->separate_colour_plane_flag) sh->colour_plane_id = get_bits(gb, 2); if (!IS_IDR(s)) { int poc, pos; sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb); poc = ff_hevc_compute_poc(s->ps.sps, s->pocTid0, sh->pic_order_cnt_lsb, s->nal_unit_type); if (!sh->first_slice_in_pic_flag && poc != s->poc) { av_log(s->avctx, AV_LOG_WARNING, "Ignoring POC change between slices: %d -> %d\n", s->poc, poc); if (s->avctx->err_recognition & AV_EF_EXPLODE) return AVERROR_INVALIDDATA; poc = s->poc; } s->poc = poc; sh->short_term_ref_pic_set_sps_flag = get_bits1(gb); pos = get_bits_left(gb); if (!sh->short_term_ref_pic_set_sps_flag) { ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1); if (ret < 0) return ret; sh->short_term_rps = &sh->slice_rps; } else { int numbits, rps_idx; if (!s->ps.sps->nb_st_rps) { av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n"); return AVERROR_INVALIDDATA; } numbits = av_ceil_log2(s->ps.sps->nb_st_rps); rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0; sh->short_term_rps = &s->ps.sps->st_rps[rps_idx]; } sh->short_term_ref_pic_set_size = pos - get_bits_left(gb); pos = get_bits_left(gb); ret = decode_lt_rps(s, &sh->long_term_rps, gb); if (ret < 0) { av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n"); if (s->avctx->err_recognition & AV_EF_EXPLODE) return AVERROR_INVALIDDATA; } sh->long_term_ref_pic_set_size = pos - get_bits_left(gb); if (s->ps.sps->sps_temporal_mvp_enabled_flag) sh->slice_temporal_mvp_enabled_flag = get_bits1(gb); else sh->slice_temporal_mvp_enabled_flag = 0; } else { s->sh.short_term_rps = NULL; s->poc = 0; } /* 8.3.1 */ if (sh->first_slice_in_pic_flag && s->temporal_id == 0 && s->nal_unit_type != HEVC_NAL_TRAIL_N && s->nal_unit_type != HEVC_NAL_TSA_N && s->nal_unit_type != HEVC_NAL_STSA_N && s->nal_unit_type != HEVC_NAL_RADL_N && s->nal_unit_type != HEVC_NAL_RADL_R && s->nal_unit_type != HEVC_NAL_RASL_N && s->nal_unit_type != HEVC_NAL_RASL_R) s->pocTid0 = s->poc; if (s->ps.sps->sao_enabled) { sh->slice_sample_adaptive_offset_flag[0] = get_bits1(gb); if (s->ps.sps->chroma_format_idc) { sh->slice_sample_adaptive_offset_flag[1] = sh->slice_sample_adaptive_offset_flag[2] = get_bits1(gb); } } else { sh->slice_sample_adaptive_offset_flag[0] = 0; sh->slice_sample_adaptive_offset_flag[1] = 0; sh->slice_sample_adaptive_offset_flag[2] = 0; } sh->nb_refs[L0] = sh->nb_refs[L1] = 0; if (sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B) { int nb_refs; sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active; if (sh->slice_type == HEVC_SLICE_B) sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active; if (get_bits1(gb)) { // num_ref_idx_active_override_flag sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1; if (sh->slice_type == HEVC_SLICE_B) sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1; } if (sh->nb_refs[L0] > HEVC_MAX_REFS || sh->nb_refs[L1] > HEVC_MAX_REFS) { av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n", sh->nb_refs[L0], sh->nb_refs[L1]); return AVERROR_INVALIDDATA; } sh->rpl_modification_flag[0] = 0; sh->rpl_modification_flag[1] = 0; nb_refs = ff_hevc_frame_nb_refs(s); if (!nb_refs) { av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n"); return AVERROR_INVALIDDATA; } if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) { sh->rpl_modification_flag[0] = get_bits1(gb); if (sh->rpl_modification_flag[0]) { for (i = 0; i < sh->nb_refs[L0]; i++) sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs)); } if (sh->slice_type == HEVC_SLICE_B) { sh->rpl_modification_flag[1] = get_bits1(gb); if (sh->rpl_modification_flag[1] == 1) for (i = 0; i < sh->nb_refs[L1]; i++) sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs)); } } if (sh->slice_type == HEVC_SLICE_B) sh->mvd_l1_zero_flag = get_bits1(gb); if (s->ps.pps->cabac_init_present_flag) sh->cabac_init_flag = get_bits1(gb); else sh->cabac_init_flag = 0; sh->collocated_ref_idx = 0; if (sh->slice_temporal_mvp_enabled_flag) { sh->collocated_list = L0; if (sh->slice_type == HEVC_SLICE_B) sh->collocated_list = !get_bits1(gb); if (sh->nb_refs[sh->collocated_list] > 1) { sh->collocated_ref_idx = get_ue_golomb_long(gb); if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) { av_log(s->avctx, AV_LOG_ERROR, "Invalid collocated_ref_idx: %d.\n", sh->collocated_ref_idx); return AVERROR_INVALIDDATA; } } } if ((s->ps.pps->weighted_pred_flag && sh->slice_type == HEVC_SLICE_P) || (s->ps.pps->weighted_bipred_flag && sh->slice_type == HEVC_SLICE_B)) { int ret = pred_weight_table(s, gb); if (ret < 0) return ret; } sh->max_num_merge_cand = 5 - get_ue_golomb_long(gb); if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) { av_log(s->avctx, AV_LOG_ERROR, "Invalid number of merging MVP candidates: %d.\n", sh->max_num_merge_cand); return AVERROR_INVALIDDATA; } } sh->slice_qp_delta = get_se_golomb(gb); if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) { sh->slice_cb_qp_offset = get_se_golomb(gb); sh->slice_cr_qp_offset = get_se_golomb(gb); } else { sh->slice_cb_qp_offset = 0; sh->slice_cr_qp_offset = 0; } if (s->ps.pps->chroma_qp_offset_list_enabled_flag) sh->cu_chroma_qp_offset_enabled_flag = get_bits1(gb); else sh->cu_chroma_qp_offset_enabled_flag = 0; if (s->ps.pps->deblocking_filter_control_present_flag) { int deblocking_filter_override_flag = 0; if (s->ps.pps->deblocking_filter_override_enabled_flag) deblocking_filter_override_flag = get_bits1(gb); if (deblocking_filter_override_flag) { sh->disable_deblocking_filter_flag = get_bits1(gb); if (!sh->disable_deblocking_filter_flag) { int beta_offset_div2 = get_se_golomb(gb); int tc_offset_div2 = get_se_golomb(gb) ; if (beta_offset_div2 < -6 || beta_offset_div2 > 6 || tc_offset_div2 < -6 || tc_offset_div2 > 6) { av_log(s->avctx, AV_LOG_ERROR, "Invalid deblock filter offsets: %d, %d\n", beta_offset_div2, tc_offset_div2); return AVERROR_INVALIDDATA; } sh->beta_offset = beta_offset_div2 * 2; sh->tc_offset = tc_offset_div2 * 2; } } else { sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf; sh->beta_offset = s->ps.pps->beta_offset; sh->tc_offset = s->ps.pps->tc_offset; } } else { sh->disable_deblocking_filter_flag = 0; sh->beta_offset = 0; sh->tc_offset = 0; } if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag && (sh->slice_sample_adaptive_offset_flag[0] || sh->slice_sample_adaptive_offset_flag[1] || !sh->disable_deblocking_filter_flag)) { sh->slice_loop_filter_across_slices_enabled_flag = get_bits1(gb); } else { sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag; } } else if (!s->slice_initialized) { av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n"); return AVERROR_INVALIDDATA; } sh->num_entry_point_offsets = 0; if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) { unsigned num_entry_point_offsets = get_ue_golomb_long(gb); if (num_entry_point_offsets > get_bits_left(gb)) { av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets); return AVERROR_INVALIDDATA; } sh->num_entry_point_offsets = num_entry_point_offsets; if (sh->num_entry_point_offsets > 0) { int offset_len = get_ue_golomb_long(gb) + 1; if (offset_len < 1 || offset_len > 32) { sh->num_entry_point_offsets = 0; av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len); return AVERROR_INVALIDDATA; } av_freep(&sh->entry_point_offset); av_freep(&sh->offset); av_freep(&sh->size); sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned)); sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int)); sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int)); if (!sh->entry_point_offset || !sh->offset || !sh->size) { sh->num_entry_point_offsets = 0; av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n"); return AVERROR(ENOMEM); } for (i = 0; i < sh->num_entry_point_offsets; i++) { unsigned val = get_bits_long(gb, offset_len); sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size } if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) { s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here s->threads_number = 1; } else s->enable_parallel_tiles = 0; } else s->enable_parallel_tiles = 0; } if (s->ps.pps->slice_header_extension_present_flag) { unsigned int length = get_ue_golomb_long(gb); if (length*8LL > get_bits_left(gb)) { av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n"); return AVERROR_INVALIDDATA; } for (i = 0; i < length; i++) skip_bits(gb, 8); // slice_header_extension_data_byte } sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta; if (sh->slice_qp > 51 || sh->slice_qp < -s->ps.sps->qp_bd_offset) { av_log(s->avctx, AV_LOG_ERROR, "The slice_qp %d is outside the valid range " "[%d, 51].\n", sh->slice_qp, -s->ps.sps->qp_bd_offset); return AVERROR_INVALIDDATA; } sh->slice_ctb_addr_rs = sh->slice_segment_addr; if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) { av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n"); return AVERROR_INVALIDDATA; } if (get_bits_left(gb) < 0) { av_log(s->avctx, AV_LOG_ERROR, "Overread slice header by %d bits\n", -get_bits_left(gb)); return AVERROR_INVALIDDATA; } s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag; if (!s->ps.pps->cu_qp_delta_enabled_flag) s->HEVClc->qp_y = s->sh.slice_qp; s->slice_initialized = 1; s->HEVClc->tu.cu_qp_offset_cb = 0; s->HEVClc->tu.cu_qp_offset_cr = 0; return 0; }
169,707
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp, struct nfsd3_setaclargs *argp, struct nfsd_attrstat *resp) { struct inode *inode; svc_fh *fh; __be32 nfserr = 0; int error; dprintk("nfsd: SETACL(2acl) %s\n", SVCFH_fmt(&argp->fh)); fh = fh_copy(&resp->fh, &argp->fh); nfserr = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_SATTR); if (nfserr) goto out; inode = d_inode(fh->fh_dentry); if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) { error = -EOPNOTSUPP; goto out_errno; } error = fh_want_write(fh); if (error) goto out_errno; error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS); if (error) goto out_drop_write; error = inode->i_op->set_acl(inode, argp->acl_default, ACL_TYPE_DEFAULT); if (error) goto out_drop_write; fh_drop_write(fh); nfserr = fh_getattr(fh, &resp->stat); out: /* argp->acl_{access,default} may have been allocated in nfssvc_decode_setaclargs. */ posix_acl_release(argp->acl_access); posix_acl_release(argp->acl_default); return nfserr; out_drop_write: fh_drop_write(fh); out_errno: nfserr = nfserrno(error); goto out; } Commit Message: nfsd: check permissions when setting ACLs Use set_posix_acl, which includes proper permission checks, instead of calling ->set_acl directly. Without this anyone may be able to grant themselves permissions to a file by setting the ACL. Lock the inode to make the new checks atomic with respect to set_acl. (Also, nfsd was the only caller of set_acl not locking the inode, so I suspect this may fix other races.) This also simplifies the code, and ensures our ACLs are checked by posix_acl_valid. The permission checks and the inode locking were lost with commit 4ac7249e, which changed nfsd to use the set_acl inode operation directly instead of going through xattr handlers. Reported-by: David Sinquin <[email protected]> [[email protected]: use set_posix_acl] Fixes: 4ac7249e Cc: Christoph Hellwig <[email protected]> Cc: Al Viro <[email protected]> Cc: [email protected] Signed-off-by: J. Bruce Fields <[email protected]> CWE ID: CWE-284
static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp, struct nfsd3_setaclargs *argp, struct nfsd_attrstat *resp) { struct inode *inode; svc_fh *fh; __be32 nfserr = 0; int error; dprintk("nfsd: SETACL(2acl) %s\n", SVCFH_fmt(&argp->fh)); fh = fh_copy(&resp->fh, &argp->fh); nfserr = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_SATTR); if (nfserr) goto out; inode = d_inode(fh->fh_dentry); error = fh_want_write(fh); if (error) goto out_errno; fh_lock(fh); error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access); if (error) goto out_drop_lock; error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default); if (error) goto out_drop_lock; fh_unlock(fh); fh_drop_write(fh); nfserr = fh_getattr(fh, &resp->stat); out: /* argp->acl_{access,default} may have been allocated in nfssvc_decode_setaclargs. */ posix_acl_release(argp->acl_access); posix_acl_release(argp->acl_default); return nfserr; out_drop_lock: fh_unlock(fh); fh_drop_write(fh); out_errno: nfserr = nfserrno(error); goto out; }
167,447
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static INT AirPDcapScanForKeys( PAIRPDCAP_CONTEXT ctx, const guint8 *data, const guint mac_header_len, const guint tot_len, AIRPDCAP_SEC_ASSOCIATION_ID id ) { const UCHAR *addr; guint bodyLength; PAIRPDCAP_SEC_ASSOCIATION sta_sa; PAIRPDCAP_SEC_ASSOCIATION sa; guint offset = 0; const guint8 dot1x_header[] = { 0xAA, /* DSAP=SNAP */ 0xAA, /* SSAP=SNAP */ 0x03, /* Control field=Unnumbered frame */ 0x00, 0x00, 0x00, /* Org. code=encaps. Ethernet */ 0x88, 0x8E /* Type: 802.1X authentication */ }; const guint8 bt_dot1x_header[] = { 0xAA, /* DSAP=SNAP */ 0xAA, /* SSAP=SNAP */ 0x03, /* Control field=Unnumbered frame */ 0x00, 0x19, 0x58, /* Org. code=Bluetooth SIG */ 0x00, 0x03 /* Type: Bluetooth Security */ }; const guint8 tdls_header[] = { 0xAA, /* DSAP=SNAP */ 0xAA, /* SSAP=SNAP */ 0x03, /* Control field=Unnumbered frame */ 0x00, 0x00, 0x00, /* Org. code=encaps. Ethernet */ 0x89, 0x0D, /* Type: 802.11 - Fast Roaming Remote Request */ 0x02, /* Payload Type: TDLS */ 0X0C /* Action Category: TDLS */ }; const EAPOL_RSN_KEY *pEAPKey; #ifdef _DEBUG #define MSGBUF_LEN 255 CHAR msgbuf[MSGBUF_LEN]; #endif AIRPDCAP_DEBUG_TRACE_START("AirPDcapScanForKeys"); /* cache offset in the packet data */ offset = mac_header_len; /* check if the packet has an LLC header and the packet is 802.1X authentication (IEEE 802.1X-2004, pg. 24) */ if (memcmp(data+offset, dot1x_header, 8) == 0 || memcmp(data+offset, bt_dot1x_header, 8) == 0) { AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Authentication: EAPOL packet", AIRPDCAP_DEBUG_LEVEL_3); /* skip LLC header */ offset+=8; /* check if the packet is a EAPOL-Key (0x03) (IEEE 802.1X-2004, pg. 25) */ if (data[offset+1]!=3) { AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Not EAPOL-Key", AIRPDCAP_DEBUG_LEVEL_3); return AIRPDCAP_RET_NO_VALID_HANDSHAKE; } /* get and check the body length (IEEE 802.1X-2004, pg. 25) */ bodyLength=pntoh16(data+offset+2); if ((tot_len-offset-4) < bodyLength) { /* Only check if frame is long enough for eapol header, ignore tailing garbage, see bug 9065 */ AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "EAPOL body too short", AIRPDCAP_DEBUG_LEVEL_3); return AIRPDCAP_RET_NO_VALID_HANDSHAKE; } /* skip EAPOL MPDU and go to the first byte of the body */ offset+=4; pEAPKey = (const EAPOL_RSN_KEY *) (data+offset); /* check if the key descriptor type is valid (IEEE 802.1X-2004, pg. 27) */ if (/*pEAPKey->type!=0x1 &&*/ /* RC4 Key Descriptor Type (deprecated) */ pEAPKey->type != AIRPDCAP_RSN_WPA2_KEY_DESCRIPTOR && /* IEEE 802.11 Key Descriptor Type (WPA2) */ pEAPKey->type != AIRPDCAP_RSN_WPA_KEY_DESCRIPTOR) /* 254 = RSN_KEY_DESCRIPTOR - WPA, */ { AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Not valid key descriptor type", AIRPDCAP_DEBUG_LEVEL_3); return AIRPDCAP_RET_NO_VALID_HANDSHAKE; } /* start with descriptor body */ offset+=1; /* search for a cached Security Association for current BSSID and AP */ sa = AirPDcapGetSaPtr(ctx, &id); if (sa == NULL){ AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "No SA for BSSID found", AIRPDCAP_DEBUG_LEVEL_3); return AIRPDCAP_RET_REQ_DATA; } /* It could be a Pairwise Key exchange, check */ if (AirPDcapRsna4WHandshake(ctx, data, sa, offset, tot_len) == AIRPDCAP_RET_SUCCESS_HANDSHAKE) return AIRPDCAP_RET_SUCCESS_HANDSHAKE; if (mac_header_len + GROUP_KEY_PAYLOAD_LEN_MIN > tot_len) { AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Message too short for Group Key", AIRPDCAP_DEBUG_LEVEL_3); return AIRPDCAP_RET_NO_VALID_HANDSHAKE; } /* Verify the bitfields: Key = 0(groupwise) Mic = 1 Ack = 1 Secure = 1 */ if (AIRPDCAP_EAP_KEY(data[offset+1])!=0 || AIRPDCAP_EAP_ACK(data[offset+1])!=1 || AIRPDCAP_EAP_MIC(data[offset]) != 1 || AIRPDCAP_EAP_SEC(data[offset]) != 1){ AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Key bitfields not correct for Group Key", AIRPDCAP_DEBUG_LEVEL_3); return AIRPDCAP_RET_NO_VALID_HANDSHAKE; } /* force STA address to be the broadcast MAC so we create an SA for the groupkey */ memcpy(id.sta, broadcast_mac, AIRPDCAP_MAC_LEN); /* get the Security Association structure for the broadcast MAC and AP */ sa = AirPDcapGetSaPtr(ctx, &id); if (sa == NULL){ return AIRPDCAP_RET_REQ_DATA; } /* Get the SA for the STA, since we need its pairwise key to decrpyt the group key */ /* get STA address */ if ( (addr=AirPDcapGetStaAddress((const AIRPDCAP_MAC_FRAME_ADDR4 *)(data))) != NULL) { memcpy(id.sta, addr, AIRPDCAP_MAC_LEN); #ifdef _DEBUG g_snprintf(msgbuf, MSGBUF_LEN, "ST_MAC: %2X.%2X.%2X.%2X.%2X.%2X\t", id.sta[0],id.sta[1],id.sta[2],id.sta[3],id.sta[4],id.sta[5]); #endif AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", msgbuf, AIRPDCAP_DEBUG_LEVEL_3); } else { AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "SA not found", AIRPDCAP_DEBUG_LEVEL_5); return AIRPDCAP_RET_REQ_DATA; } sta_sa = AirPDcapGetSaPtr(ctx, &id); if (sta_sa == NULL){ return AIRPDCAP_RET_REQ_DATA; } /* Try to extract the group key and install it in the SA */ return (AirPDcapDecryptWPABroadcastKey(pEAPKey, sta_sa->wpa.ptk+16, sa, tot_len-offset+1)); } else if (memcmp(data+offset, tdls_header, 10) == 0) { const guint8 *initiator, *responder; guint8 action; guint status, offset_rsne = 0, offset_fte = 0, offset_link = 0, offset_timeout = 0; AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Authentication: TDLS Action Frame", AIRPDCAP_DEBUG_LEVEL_3); /* skip LLC header */ offset+=10; /* check if the packet is a TDLS response or confirm */ action = data[offset]; if (action!=1 && action!=2) { AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Not Response nor confirm", AIRPDCAP_DEBUG_LEVEL_3); return AIRPDCAP_RET_NO_VALID_HANDSHAKE; } /* check status */ offset++; status=pntoh16(data+offset); if (status!=0) { AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "TDLS setup not successfull", AIRPDCAP_DEBUG_LEVEL_3); return AIRPDCAP_RET_NO_VALID_HANDSHAKE; } /* skip Token + capabilities */ offset+=5; /* search for RSN, Fast BSS Transition, Link Identifier and Timeout Interval IEs */ while(offset < (tot_len - 2)) { if (data[offset] == 48) { offset_rsne = offset; } else if (data[offset] == 55) { offset_fte = offset; } else if (data[offset] == 56) { offset_timeout = offset; } else if (data[offset] == 101) { offset_link = offset; } if (tot_len < offset + data[offset + 1] + 2) { return AIRPDCAP_RET_NO_VALID_HANDSHAKE; } offset += data[offset + 1] + 2; } if (offset_rsne == 0 || offset_fte == 0 || offset_timeout == 0 || offset_link == 0) { AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Cannot Find all necessary IEs", AIRPDCAP_DEBUG_LEVEL_3); return AIRPDCAP_RET_NO_VALID_HANDSHAKE; } AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Found RSNE/Fast BSS/Timeout Interval/Link IEs", AIRPDCAP_DEBUG_LEVEL_3); /* Will create a Security Association between 2 STA. Need to get both MAC address */ initiator = &data[offset_link + 8]; responder = &data[offset_link + 14]; if (memcmp(initiator, responder, AIRPDCAP_MAC_LEN) < 0) { memcpy(id.sta, initiator, AIRPDCAP_MAC_LEN); memcpy(id.bssid, responder, AIRPDCAP_MAC_LEN); } else { memcpy(id.sta, responder, AIRPDCAP_MAC_LEN); memcpy(id.bssid, initiator, AIRPDCAP_MAC_LEN); } sa = AirPDcapGetSaPtr(ctx, &id); if (sa == NULL){ return AIRPDCAP_RET_REQ_DATA; } if (sa->validKey) { if (memcmp(sa->wpa.nonce, data + offset_fte + 52, AIRPDCAP_WPA_NONCE_LEN) == 0) { /* Already have valid key for this SA, no need to redo key derivation */ return AIRPDCAP_RET_SUCCESS_HANDSHAKE; } else { /* We are opening a new session with the same two STA, save previous sa */ AIRPDCAP_SEC_ASSOCIATION *tmp_sa = g_new(AIRPDCAP_SEC_ASSOCIATION, 1); memcpy(tmp_sa, sa, sizeof(AIRPDCAP_SEC_ASSOCIATION)); sa->next=tmp_sa; sa->validKey = FALSE; } } if (AirPDcapTDLSDeriveKey(sa, data, offset_rsne, offset_fte, offset_timeout, offset_link, action) == AIRPDCAP_RET_SUCCESS) { AIRPDCAP_DEBUG_TRACE_END("AirPDcapScanForKeys"); return AIRPDCAP_RET_SUCCESS_HANDSHAKE; } } else { AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Skipping: not an EAPOL packet", AIRPDCAP_DEBUG_LEVEL_3); } AIRPDCAP_DEBUG_TRACE_END("AirPDcapScanForKeys"); return AIRPDCAP_RET_NO_VALID_HANDSHAKE; } Commit Message: Make sure EAPOL body is big enough for a EAPOL_RSN_KEY. A pointer to a EAPOL_RSN_KEY is set on the packet presuming the whole EAPOL_RSN_KEY is there. That's not always the case for fuzzed/malicious captures. Bug: 11585 Change-Id: Ib94b8aceef444c7820e43b969596efdb8dbecccd Reviewed-on: https://code.wireshark.org/review/15540 Reviewed-by: Michael Mann <[email protected]> Petri-Dish: Michael Mann <[email protected]> Tested-by: Petri Dish Buildbot <[email protected]> Reviewed-by: Anders Broman <[email protected]> CWE ID: CWE-20
static INT AirPDcapScanForKeys( PAIRPDCAP_CONTEXT ctx, const guint8 *data, const guint mac_header_len, const guint tot_len, AIRPDCAP_SEC_ASSOCIATION_ID id ) { const UCHAR *addr; guint bodyLength; PAIRPDCAP_SEC_ASSOCIATION sta_sa; PAIRPDCAP_SEC_ASSOCIATION sa; guint offset = 0; const guint8 dot1x_header[] = { 0xAA, /* DSAP=SNAP */ 0xAA, /* SSAP=SNAP */ 0x03, /* Control field=Unnumbered frame */ 0x00, 0x00, 0x00, /* Org. code=encaps. Ethernet */ 0x88, 0x8E /* Type: 802.1X authentication */ }; const guint8 bt_dot1x_header[] = { 0xAA, /* DSAP=SNAP */ 0xAA, /* SSAP=SNAP */ 0x03, /* Control field=Unnumbered frame */ 0x00, 0x19, 0x58, /* Org. code=Bluetooth SIG */ 0x00, 0x03 /* Type: Bluetooth Security */ }; const guint8 tdls_header[] = { 0xAA, /* DSAP=SNAP */ 0xAA, /* SSAP=SNAP */ 0x03, /* Control field=Unnumbered frame */ 0x00, 0x00, 0x00, /* Org. code=encaps. Ethernet */ 0x89, 0x0D, /* Type: 802.11 - Fast Roaming Remote Request */ 0x02, /* Payload Type: TDLS */ 0X0C /* Action Category: TDLS */ }; const EAPOL_RSN_KEY *pEAPKey; #ifdef _DEBUG #define MSGBUF_LEN 255 CHAR msgbuf[MSGBUF_LEN]; #endif AIRPDCAP_DEBUG_TRACE_START("AirPDcapScanForKeys"); /* cache offset in the packet data */ offset = mac_header_len; /* check if the packet has an LLC header and the packet is 802.1X authentication (IEEE 802.1X-2004, pg. 24) */ if (memcmp(data+offset, dot1x_header, 8) == 0 || memcmp(data+offset, bt_dot1x_header, 8) == 0) { AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Authentication: EAPOL packet", AIRPDCAP_DEBUG_LEVEL_3); /* skip LLC header */ offset+=8; /* check if the packet is a EAPOL-Key (0x03) (IEEE 802.1X-2004, pg. 25) */ if (data[offset+1]!=3) { AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Not EAPOL-Key", AIRPDCAP_DEBUG_LEVEL_3); return AIRPDCAP_RET_NO_VALID_HANDSHAKE; } /* get and check the body length (IEEE 802.1X-2004, pg. 25) */ bodyLength=pntoh16(data+offset+2); if (((tot_len-offset-4) < bodyLength) || (bodyLength < sizeof(EAPOL_RSN_KEY))) { /* Only check if frame is long enough for eapol header, ignore tailing garbage, see bug 9065 */ AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "EAPOL body too short", AIRPDCAP_DEBUG_LEVEL_3); return AIRPDCAP_RET_NO_VALID_HANDSHAKE; } /* skip EAPOL MPDU and go to the first byte of the body */ offset+=4; pEAPKey = (const EAPOL_RSN_KEY *) (data+offset); /* check if the key descriptor type is valid (IEEE 802.1X-2004, pg. 27) */ if (/*pEAPKey->type!=0x1 &&*/ /* RC4 Key Descriptor Type (deprecated) */ pEAPKey->type != AIRPDCAP_RSN_WPA2_KEY_DESCRIPTOR && /* IEEE 802.11 Key Descriptor Type (WPA2) */ pEAPKey->type != AIRPDCAP_RSN_WPA_KEY_DESCRIPTOR) /* 254 = RSN_KEY_DESCRIPTOR - WPA, */ { AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Not valid key descriptor type", AIRPDCAP_DEBUG_LEVEL_3); return AIRPDCAP_RET_NO_VALID_HANDSHAKE; } /* start with descriptor body */ offset+=1; /* search for a cached Security Association for current BSSID and AP */ sa = AirPDcapGetSaPtr(ctx, &id); if (sa == NULL){ AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "No SA for BSSID found", AIRPDCAP_DEBUG_LEVEL_3); return AIRPDCAP_RET_REQ_DATA; } /* It could be a Pairwise Key exchange, check */ if (AirPDcapRsna4WHandshake(ctx, data, sa, offset, tot_len) == AIRPDCAP_RET_SUCCESS_HANDSHAKE) return AIRPDCAP_RET_SUCCESS_HANDSHAKE; if (mac_header_len + GROUP_KEY_PAYLOAD_LEN_MIN > tot_len) { AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Message too short for Group Key", AIRPDCAP_DEBUG_LEVEL_3); return AIRPDCAP_RET_NO_VALID_HANDSHAKE; } /* Verify the bitfields: Key = 0(groupwise) Mic = 1 Ack = 1 Secure = 1 */ if (AIRPDCAP_EAP_KEY(data[offset+1])!=0 || AIRPDCAP_EAP_ACK(data[offset+1])!=1 || AIRPDCAP_EAP_MIC(data[offset]) != 1 || AIRPDCAP_EAP_SEC(data[offset]) != 1){ AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Key bitfields not correct for Group Key", AIRPDCAP_DEBUG_LEVEL_3); return AIRPDCAP_RET_NO_VALID_HANDSHAKE; } /* force STA address to be the broadcast MAC so we create an SA for the groupkey */ memcpy(id.sta, broadcast_mac, AIRPDCAP_MAC_LEN); /* get the Security Association structure for the broadcast MAC and AP */ sa = AirPDcapGetSaPtr(ctx, &id); if (sa == NULL){ return AIRPDCAP_RET_REQ_DATA; } /* Get the SA for the STA, since we need its pairwise key to decrpyt the group key */ /* get STA address */ if ( (addr=AirPDcapGetStaAddress((const AIRPDCAP_MAC_FRAME_ADDR4 *)(data))) != NULL) { memcpy(id.sta, addr, AIRPDCAP_MAC_LEN); #ifdef _DEBUG g_snprintf(msgbuf, MSGBUF_LEN, "ST_MAC: %2X.%2X.%2X.%2X.%2X.%2X\t", id.sta[0],id.sta[1],id.sta[2],id.sta[3],id.sta[4],id.sta[5]); #endif AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", msgbuf, AIRPDCAP_DEBUG_LEVEL_3); } else { AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "SA not found", AIRPDCAP_DEBUG_LEVEL_5); return AIRPDCAP_RET_REQ_DATA; } sta_sa = AirPDcapGetSaPtr(ctx, &id); if (sta_sa == NULL){ return AIRPDCAP_RET_REQ_DATA; } /* Try to extract the group key and install it in the SA */ return (AirPDcapDecryptWPABroadcastKey(pEAPKey, sta_sa->wpa.ptk+16, sa, tot_len-offset+1)); } else if (memcmp(data+offset, tdls_header, 10) == 0) { const guint8 *initiator, *responder; guint8 action; guint status, offset_rsne = 0, offset_fte = 0, offset_link = 0, offset_timeout = 0; AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Authentication: TDLS Action Frame", AIRPDCAP_DEBUG_LEVEL_3); /* skip LLC header */ offset+=10; /* check if the packet is a TDLS response or confirm */ action = data[offset]; if (action!=1 && action!=2) { AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Not Response nor confirm", AIRPDCAP_DEBUG_LEVEL_3); return AIRPDCAP_RET_NO_VALID_HANDSHAKE; } /* check status */ offset++; status=pntoh16(data+offset); if (status!=0) { AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "TDLS setup not successfull", AIRPDCAP_DEBUG_LEVEL_3); return AIRPDCAP_RET_NO_VALID_HANDSHAKE; } /* skip Token + capabilities */ offset+=5; /* search for RSN, Fast BSS Transition, Link Identifier and Timeout Interval IEs */ while(offset < (tot_len - 2)) { if (data[offset] == 48) { offset_rsne = offset; } else if (data[offset] == 55) { offset_fte = offset; } else if (data[offset] == 56) { offset_timeout = offset; } else if (data[offset] == 101) { offset_link = offset; } if (tot_len < offset + data[offset + 1] + 2) { return AIRPDCAP_RET_NO_VALID_HANDSHAKE; } offset += data[offset + 1] + 2; } if (offset_rsne == 0 || offset_fte == 0 || offset_timeout == 0 || offset_link == 0) { AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Cannot Find all necessary IEs", AIRPDCAP_DEBUG_LEVEL_3); return AIRPDCAP_RET_NO_VALID_HANDSHAKE; } AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Found RSNE/Fast BSS/Timeout Interval/Link IEs", AIRPDCAP_DEBUG_LEVEL_3); /* Will create a Security Association between 2 STA. Need to get both MAC address */ initiator = &data[offset_link + 8]; responder = &data[offset_link + 14]; if (memcmp(initiator, responder, AIRPDCAP_MAC_LEN) < 0) { memcpy(id.sta, initiator, AIRPDCAP_MAC_LEN); memcpy(id.bssid, responder, AIRPDCAP_MAC_LEN); } else { memcpy(id.sta, responder, AIRPDCAP_MAC_LEN); memcpy(id.bssid, initiator, AIRPDCAP_MAC_LEN); } sa = AirPDcapGetSaPtr(ctx, &id); if (sa == NULL){ return AIRPDCAP_RET_REQ_DATA; } if (sa->validKey) { if (memcmp(sa->wpa.nonce, data + offset_fte + 52, AIRPDCAP_WPA_NONCE_LEN) == 0) { /* Already have valid key for this SA, no need to redo key derivation */ return AIRPDCAP_RET_SUCCESS_HANDSHAKE; } else { /* We are opening a new session with the same two STA, save previous sa */ AIRPDCAP_SEC_ASSOCIATION *tmp_sa = g_new(AIRPDCAP_SEC_ASSOCIATION, 1); memcpy(tmp_sa, sa, sizeof(AIRPDCAP_SEC_ASSOCIATION)); sa->next=tmp_sa; sa->validKey = FALSE; } } if (AirPDcapTDLSDeriveKey(sa, data, offset_rsne, offset_fte, offset_timeout, offset_link, action) == AIRPDCAP_RET_SUCCESS) { AIRPDCAP_DEBUG_TRACE_END("AirPDcapScanForKeys"); return AIRPDCAP_RET_SUCCESS_HANDSHAKE; } } else { AIRPDCAP_DEBUG_PRINT_LINE("AirPDcapScanForKeys", "Skipping: not an EAPOL packet", AIRPDCAP_DEBUG_LEVEL_3); } AIRPDCAP_DEBUG_TRACE_END("AirPDcapScanForKeys"); return AIRPDCAP_RET_NO_VALID_HANDSHAKE; }
167,158
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void HTMLDocument::removeItemFromMap(HashCountedSet<StringImpl*>& map, const AtomicString& name) { if (name.isEmpty()) return; map.remove(name.impl()); if (Frame* f = frame()) f->script()->namedItemRemoved(this, name); } Commit Message: Fix tracking of the id attribute string if it is shared across elements. The patch to remove AtomicStringImpl: http://src.chromium.org/viewvc/blink?view=rev&rev=154790 Exposed a lifetime issue with strings for id attributes. We simply need to use AtomicString. BUG=290566 Review URL: https://codereview.chromium.org/33793004 git-svn-id: svn://svn.chromium.org/blink/trunk@160250 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-399
void HTMLDocument::removeItemFromMap(HashCountedSet<StringImpl*>& map, const AtomicString& name) void HTMLDocument::removeItemFromMap(HashCountedSet<AtomicString>& map, const AtomicString& name) { if (name.isEmpty()) return; map.remove(name); if (Frame* f = frame()) f->script()->namedItemRemoved(this, name); }
171,157
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int cdxl_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt) { CDXLVideoContext *c = avctx->priv_data; AVFrame * const p = data; int ret, w, h, encoding, aligned_width, buf_size = pkt->size; const uint8_t *buf = pkt->data; if (buf_size < 32) return AVERROR_INVALIDDATA; encoding = buf[1] & 7; c->format = buf[1] & 0xE0; w = AV_RB16(&buf[14]); h = AV_RB16(&buf[16]); c->bpp = buf[19]; c->palette_size = AV_RB16(&buf[20]); c->palette = buf + 32; c->video = c->palette + c->palette_size; c->video_size = buf_size - c->palette_size - 32; if (c->palette_size > 512) return AVERROR_INVALIDDATA; if (buf_size < c->palette_size + 32) return AVERROR_INVALIDDATA; if (c->bpp < 1) return AVERROR_INVALIDDATA; if (c->format != BIT_PLANAR && c->format != BIT_LINE && c->format != CHUNKY) { avpriv_request_sample(avctx, "Pixel format 0x%0x", c->format); return AVERROR_PATCHWELCOME; } if ((ret = ff_set_dimensions(avctx, w, h)) < 0) return ret; if (c->format == CHUNKY) aligned_width = avctx->width; else aligned_width = FFALIGN(c->avctx->width, 16); c->padded_bits = aligned_width - c->avctx->width; if (c->video_size < aligned_width * avctx->height * (int64_t)c->bpp / 8) return AVERROR_INVALIDDATA; if (!encoding && c->palette_size && c->bpp <= 8) { avctx->pix_fmt = AV_PIX_FMT_PAL8; } else if (encoding == 1 && (c->bpp == 6 || c->bpp == 8)) { if (c->palette_size != (1 << (c->bpp - 1))) return AVERROR_INVALIDDATA; avctx->pix_fmt = AV_PIX_FMT_BGR24; } else if (!encoding && c->bpp == 24 && c->format == CHUNKY && !c->palette_size) { avctx->pix_fmt = AV_PIX_FMT_RGB24; } else { avpriv_request_sample(avctx, "Encoding %d, bpp %d and format 0x%x", encoding, c->bpp, c->format); return AVERROR_PATCHWELCOME; } if ((ret = ff_get_buffer(avctx, p, 0)) < 0) return ret; p->pict_type = AV_PICTURE_TYPE_I; if (encoding) { av_fast_padded_malloc(&c->new_video, &c->new_video_size, h * w + AV_INPUT_BUFFER_PADDING_SIZE); if (!c->new_video) return AVERROR(ENOMEM); if (c->bpp == 8) cdxl_decode_ham8(c, p); else cdxl_decode_ham6(c, p); } else if (avctx->pix_fmt == AV_PIX_FMT_PAL8) { cdxl_decode_rgb(c, p); } else { cdxl_decode_raw(c, p); } *got_frame = 1; return buf_size; } Commit Message: avcodec/cdxl: Check format parameter Fixes out of array access Fixes: 1378/clusterfuzz-testcase-minimized-5715088008806400 Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/targets/ffmpeg Signed-off-by: Michael Niedermayer <[email protected]> CWE ID: CWE-119
static int cdxl_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt) { CDXLVideoContext *c = avctx->priv_data; AVFrame * const p = data; int ret, w, h, encoding, aligned_width, buf_size = pkt->size; const uint8_t *buf = pkt->data; if (buf_size < 32) return AVERROR_INVALIDDATA; encoding = buf[1] & 7; c->format = buf[1] & 0xE0; w = AV_RB16(&buf[14]); h = AV_RB16(&buf[16]); c->bpp = buf[19]; c->palette_size = AV_RB16(&buf[20]); c->palette = buf + 32; c->video = c->palette + c->palette_size; c->video_size = buf_size - c->palette_size - 32; if (c->palette_size > 512) return AVERROR_INVALIDDATA; if (buf_size < c->palette_size + 32) return AVERROR_INVALIDDATA; if (c->bpp < 1) return AVERROR_INVALIDDATA; if (c->format != BIT_PLANAR && c->format != BIT_LINE && c->format != CHUNKY) { avpriv_request_sample(avctx, "Pixel format 0x%0x", c->format); return AVERROR_PATCHWELCOME; } if ((ret = ff_set_dimensions(avctx, w, h)) < 0) return ret; if (c->format == CHUNKY) aligned_width = avctx->width; else aligned_width = FFALIGN(c->avctx->width, 16); c->padded_bits = aligned_width - c->avctx->width; if (c->video_size < aligned_width * avctx->height * (int64_t)c->bpp / 8) return AVERROR_INVALIDDATA; if (!encoding && c->palette_size && c->bpp <= 8 && c->format != CHUNKY) { avctx->pix_fmt = AV_PIX_FMT_PAL8; } else if (encoding == 1 && (c->bpp == 6 || c->bpp == 8)) { if (c->palette_size != (1 << (c->bpp - 1))) return AVERROR_INVALIDDATA; avctx->pix_fmt = AV_PIX_FMT_BGR24; } else if (!encoding && c->bpp == 24 && c->format == CHUNKY && !c->palette_size) { avctx->pix_fmt = AV_PIX_FMT_RGB24; } else { avpriv_request_sample(avctx, "Encoding %d, bpp %d and format 0x%x", encoding, c->bpp, c->format); return AVERROR_PATCHWELCOME; } if ((ret = ff_get_buffer(avctx, p, 0)) < 0) return ret; p->pict_type = AV_PICTURE_TYPE_I; if (encoding) { av_fast_padded_malloc(&c->new_video, &c->new_video_size, h * w + AV_INPUT_BUFFER_PADDING_SIZE); if (!c->new_video) return AVERROR(ENOMEM); if (c->bpp == 8) cdxl_decode_ham8(c, p); else cdxl_decode_ham6(c, p); } else if (avctx->pix_fmt == AV_PIX_FMT_PAL8) { cdxl_decode_rgb(c, p); } else { cdxl_decode_raw(c, p); } *got_frame = 1; return buf_size; }
170,042
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: long Segment::DoParseNext( const Cluster*& pResult, long long& pos, long& len) { long long total, avail; long status = m_pReader->Length(&total, &avail); if (status < 0) //error return status; assert((total < 0) || (avail <= total)); const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size; long long off_next = 0; long long cluster_size = -1; for (;;) { if ((total >= 0) && (pos >= total)) return 1; //EOF if ((segment_stop >= 0) && (pos >= segment_stop)) return 1; //EOF if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(m_pReader, pos, len); if (result < 0) //error return static_cast<long>(result); if (result > 0) //weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long idpos = pos; //absolute const long long idoff = pos - m_start; //relative const long long id = ReadUInt(m_pReader, idpos, len); //absolute if (id < 0) //error return static_cast<long>(id); if (id == 0) //weird return -1; //generic error pos += len; //consume ID if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } result = GetUIntLength(m_pReader, pos, len); if (result < 0) //error return static_cast<long>(result); if (result > 0) //weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long size = ReadUInt(m_pReader, pos, len); if (size < 0) //error return static_cast<long>(size); pos += len; //consume length of size of element if (size == 0) //weird continue; const long long unknown_size = (1LL << (7 * len)) - 1; if ((segment_stop >= 0) && (size != unknown_size) && ((pos + size) > segment_stop)) { return E_FILE_FORMAT_INVALID; } if (id == 0x0C53BB6B) //Cues ID { if (size == unknown_size) return E_FILE_FORMAT_INVALID; const long long element_stop = pos + size; if ((segment_stop >= 0) && (element_stop > segment_stop)) return E_FILE_FORMAT_INVALID; const long long element_start = idpos; const long long element_size = element_stop - element_start; if (m_pCues == NULL) { m_pCues = new Cues(this, pos, size, element_start, element_size); assert(m_pCues); //TODO } pos += size; //consume payload assert((segment_stop < 0) || (pos <= segment_stop)); continue; } if (id != 0x0F43B675) //not a Cluster ID { if (size == unknown_size) return E_FILE_FORMAT_INVALID; pos += size; //consume payload assert((segment_stop < 0) || (pos <= segment_stop)); continue; } #if 0 //this is commented-out to support incremental cluster parsing len = static_cast<long>(size); if (element_stop > avail) return E_BUFFER_NOT_FULL; #endif off_next = idoff; if (size != unknown_size) cluster_size = size; break; } assert(off_next > 0); //have cluster Cluster** const ii = m_clusters + m_clusterCount; Cluster** i = ii; Cluster** const jj = ii + m_clusterPreloadCount; Cluster** j = jj; while (i < j) { Cluster** const k = i + (j - i) / 2; assert(k < jj); const Cluster* const pNext = *k; assert(pNext); assert(pNext->m_index < 0); pos = pNext->GetPosition(); assert(pos >= 0); if (pos < off_next) i = k + 1; else if (pos > off_next) j = k; else { pResult = pNext; return 0; //success } } assert(i == j); long long pos_; long len_; status = Cluster::HasBlockEntries(this, off_next, pos_, len_); if (status < 0) //error or underflow { pos = pos_; len = len_; return status; } if (status > 0) //means "found at least one block entry" { Cluster* const pNext = Cluster::Create(this, -1, //preloaded off_next); assert(pNext); const ptrdiff_t idx_next = i - m_clusters; //insertion position PreloadCluster(pNext, idx_next); assert(m_clusters); assert(idx_next < m_clusterSize); assert(m_clusters[idx_next] == pNext); pResult = pNext; return 0; //success } if (cluster_size < 0) //unknown size { const long long payload_pos = pos; //absolute pos of cluster payload for (;;) //determine cluster size { if ((total >= 0) && (pos >= total)) break; if ((segment_stop >= 0) && (pos >= segment_stop)) break; //no more clusters if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(m_pReader, pos, len); if (result < 0) //error return static_cast<long>(result); if (result > 0) //weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long idpos = pos; const long long id = ReadUInt(m_pReader, idpos, len); if (id < 0) //error (or underflow) return static_cast<long>(id); if (id == 0x0F43B675) //Cluster ID break; if (id == 0x0C53BB6B) //Cues ID break; pos += len; //consume ID (of sub-element) if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } result = GetUIntLength(m_pReader, pos, len); if (result < 0) //error return static_cast<long>(result); if (result > 0) //weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long size = ReadUInt(m_pReader, pos, len); if (size < 0) //error return static_cast<long>(size); pos += len; //consume size field of element if (size == 0) //weird continue; const long long unknown_size = (1LL << (7 * len)) - 1; if (size == unknown_size) return E_FILE_FORMAT_INVALID; //not allowed for sub-elements if ((segment_stop >= 0) && ((pos + size) > segment_stop)) //weird return E_FILE_FORMAT_INVALID; pos += size; //consume payload of sub-element assert((segment_stop < 0) || (pos <= segment_stop)); } //determine cluster size cluster_size = pos - payload_pos; assert(cluster_size >= 0); //TODO: handle cluster_size = 0 pos = payload_pos; //reset and re-parse original cluster } pos += cluster_size; //consume payload assert((segment_stop < 0) || (pos <= segment_stop)); return 2; //try to find a cluster that follows next } Commit Message: libwebm: Pull from upstream Rolling mkvparser from upstream. Primarily for fixing a bug on parsing failures with certain Opus WebM files. Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae The diff is so huge because there were some style clean ups upstream. But it was ensured that there were no breaking changes when the style clean ups was done upstream. Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039 CWE ID: CWE-119
long Segment::DoParseNext( pResult = 0; if (pCurr->m_index >= 0) { // loaded (not merely preloaded) assert(m_clusters[pCurr->m_index] == pCurr); const long next_idx = pCurr->m_index + 1; if (next_idx < m_clusterCount) { pResult = m_clusters[next_idx]; return 0; // success } // curr cluster is last among loaded const long result = LoadCluster(pos, len); if (result < 0) // error or underflow return result; if (result > 0) // no more clusters { // pResult = &m_eos; return 1; } pResult = GetLast(); return 0; // success } assert(m_pos > 0); long long total, avail; long status = m_pReader->Length(&total, &avail); if (status < 0) // error return status; assert((total < 0) || (avail <= total)); const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size; // interrogate curr cluster pos = pCurr->m_element_start; if (pCurr->m_element_size >= 0) pos += pCurr->m_element_size; else { if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long id = ReadUInt(m_pReader, pos, len); if (id != 0x0F43B675) // weird: not Cluster ID return -1; pos += len; // consume ID // Read Size if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long size = ReadUInt(m_pReader, pos, len); if (size < 0) // error return static_cast<long>(size); pos += len; // consume size field const long long unknown_size = (1LL << (7 * len)) - 1; if (size == unknown_size) // TODO: should never happen return E_FILE_FORMAT_INVALID; // TODO: resolve this // assert((pCurr->m_size <= 0) || (pCurr->m_size == size)); if ((segment_stop >= 0) && ((pos + size) > segment_stop)) return E_FILE_FORMAT_INVALID; // Pos now points to start of payload pos += size; // consume payload (that is, the current cluster) assert((segment_stop < 0) || (pos <= segment_stop)); // By consuming the payload, we are assuming that the curr // cluster isn't interesting. That is, we don't bother checking // whether the payload of the curr cluster is less than what // happens to be available (obtained via IMkvReader::Length). // Presumably the caller has already dispensed with the current // cluster, and really does want the next cluster. } // pos now points to just beyond the last fully-loaded cluster for (;;) { const long status = DoParseNext(pResult, pos, len); if (status <= 1) return status; } } long Segment::DoParseNext(const Cluster*& pResult, long long& pos, long& len) { long long total, avail; long status = m_pReader->Length(&total, &avail); if (status < 0) // error return status; assert((total < 0) || (avail <= total)); const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size; // Parse next cluster. This is strictly a parsing activity. // Creation of a new cluster object happens later, after the // parsing is done. long long off_next = 0; long long cluster_size = -1; for (;;) { if ((total >= 0) && (pos >= total)) return 1; // EOF if ((segment_stop >= 0) && (pos >= segment_stop)) return 1; // EOF if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long idpos = pos; // absolute const long long idoff = pos - m_start; // relative const long long id = ReadUInt(m_pReader, idpos, len); // absolute if (id < 0) // error return static_cast<long>(id); if (id == 0) // weird return -1; // generic error pos += len; // consume ID // Read Size if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long size = ReadUInt(m_pReader, pos, len); if (size < 0) // error return static_cast<long>(size); pos += len; // consume length of size of element // Pos now points to start of payload if (size == 0) // weird continue; const long long unknown_size = (1LL << (7 * len)) - 1; if ((segment_stop >= 0) && (size != unknown_size) && ((pos + size) > segment_stop)) { return E_FILE_FORMAT_INVALID; } if (id == 0x0C53BB6B) { // Cues ID if (size == unknown_size) return E_FILE_FORMAT_INVALID; const long long element_stop = pos + size; if ((segment_stop >= 0) && (element_stop > segment_stop)) return E_FILE_FORMAT_INVALID; const long long element_start = idpos; const long long element_size = element_stop - element_start; if (m_pCues == NULL) { m_pCues = new Cues(this, pos, size, element_start, element_size); assert(m_pCues); // TODO } pos += size; // consume payload assert((segment_stop < 0) || (pos <= segment_stop)); continue; } if (id != 0x0F43B675) { // not a Cluster ID if (size == unknown_size) return E_FILE_FORMAT_INVALID; pos += size; // consume payload assert((segment_stop < 0) || (pos <= segment_stop)); continue; } #if 0 // this is commented-out to support incremental cluster parsing len = static_cast<long>(size); if (element_stop > avail) return E_BUFFER_NOT_FULL; #endif // We have a cluster. off_next = idoff; if (size != unknown_size) cluster_size = size; break; } assert(off_next > 0); // have cluster // We have parsed the next cluster. // We have not created a cluster object yet. What we need // to do now is determine whether it has already be preloaded //(in which case, an object for this cluster has already been // created), and if not, create a new cluster object. Cluster** const ii = m_clusters + m_clusterCount; Cluster** i = ii; Cluster** const jj = ii + m_clusterPreloadCount; Cluster** j = jj; while (i < j) { // INVARIANT: //[0, i) < pos_next //[i, j) ? //[j, jj) > pos_next Cluster** const k = i + (j - i) / 2; assert(k < jj); const Cluster* const pNext = *k; assert(pNext); assert(pNext->m_index < 0); pos = pNext->GetPosition(); assert(pos >= 0); if (pos < off_next) i = k + 1; else if (pos > off_next) j = k; else { pResult = pNext; return 0; // success } } assert(i == j); long long pos_; long len_; status = Cluster::HasBlockEntries(this, off_next, pos_, len_); if (status < 0) { // error or underflow pos = pos_; len = len_; return status; } if (status > 0) { // means "found at least one block entry" Cluster* const pNext = Cluster::Create(this, -1, // preloaded off_next); // element_size); assert(pNext); const ptrdiff_t idx_next = i - m_clusters; // insertion position PreloadCluster(pNext, idx_next); assert(m_clusters); assert(idx_next < m_clusterSize); assert(m_clusters[idx_next] == pNext); pResult = pNext; return 0; // success } // status == 0 means "no block entries found" if (cluster_size < 0) { // unknown size const long long payload_pos = pos; // absolute pos of cluster payload for (;;) { // determine cluster size if ((total >= 0) && (pos >= total)) break; if ((segment_stop >= 0) && (pos >= segment_stop)) break; // no more clusters // Read ID if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long idpos = pos; const long long id = ReadUInt(m_pReader, idpos, len); if (id < 0) // error (or underflow) return static_cast<long>(id); // This is the distinguished set of ID's we use to determine // that we have exhausted the sub-element's inside the cluster // whose ID we parsed earlier. if (id == 0x0F43B675) // Cluster ID break; if (id == 0x0C53BB6B) // Cues ID break; pos += len; // consume ID (of sub-element) // Read Size if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long size = ReadUInt(m_pReader, pos, len); if (size < 0) // error return static_cast<long>(size); pos += len; // consume size field of element // pos now points to start of sub-element's payload if (size == 0) // weird continue; const long long unknown_size = (1LL << (7 * len)) - 1; if (size == unknown_size) return E_FILE_FORMAT_INVALID; // not allowed for sub-elements if ((segment_stop >= 0) && ((pos + size) > segment_stop)) // weird return E_FILE_FORMAT_INVALID; pos += size; // consume payload of sub-element assert((segment_stop < 0) || (pos <= segment_stop)); } // determine cluster size cluster_size = pos - payload_pos; assert(cluster_size >= 0); // TODO: handle cluster_size = 0 pos = payload_pos; // reset and re-parse original cluster } pos += cluster_size; // consume payload assert((segment_stop < 0) || (pos <= segment_stop)); return 2; // try to find a cluster that follows next }
174,266
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *a, ASN1_BIT_STRING *signature, void *asn, EVP_PKEY *pkey) { EVP_MD_CTX ctx; const EVP_MD *type = NULL; unsigned char *buf_in=NULL; int ret= -1,inl; int mdnid, pknid; EVP_MD_CTX_init(&ctx); /* Convert signature OID into digest and public key OIDs */ if (type == NULL) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_UNKNOWN_MESSAGE_DIGEST_ALGORITHM); goto err; } /* Check public key OID matches public key type */ if (EVP_PKEY_type(pknid) != pkey->ameth->pkey_id) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_WRONG_PUBLIC_KEY_TYPE); goto err; } if (!EVP_VerifyInit_ex(&ctx,type, NULL)) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB); ret=0; goto err; } inl = ASN1_item_i2d(asn, &buf_in, it); if (buf_in == NULL) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_MALLOC_FAILURE); goto err; } EVP_VerifyUpdate(&ctx,(unsigned char *)buf_in,inl); OPENSSL_cleanse(buf_in,(unsigned int)inl); OPENSSL_free(buf_in); if (EVP_VerifyFinal(&ctx,(unsigned char *)signature->data, (unsigned int)signature->length,pkey) <= 0) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB); ret=0; goto err; } /* we don't need to zero the 'ctx' because we just checked * public information */ /* memset(&ctx,0,sizeof(ctx)); */ ret=1; err: EVP_MD_CTX_cleanup(&ctx); return(ret); } Commit Message: CWE ID: CWE-310
int ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *a, ASN1_BIT_STRING *signature, void *asn, EVP_PKEY *pkey) { EVP_MD_CTX ctx; const EVP_MD *type = NULL; unsigned char *buf_in=NULL; int ret= -1,inl; int mdnid, pknid; if (!pkey) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY, ERR_R_PASSED_NULL_PARAMETER); return -1; } EVP_MD_CTX_init(&ctx); /* Convert signature OID into digest and public key OIDs */ if (type == NULL) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_UNKNOWN_MESSAGE_DIGEST_ALGORITHM); goto err; } /* Check public key OID matches public key type */ if (EVP_PKEY_type(pknid) != pkey->ameth->pkey_id) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_WRONG_PUBLIC_KEY_TYPE); goto err; } if (!EVP_VerifyInit_ex(&ctx,type, NULL)) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB); ret=0; goto err; } inl = ASN1_item_i2d(asn, &buf_in, it); if (buf_in == NULL) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_MALLOC_FAILURE); goto err; } EVP_VerifyUpdate(&ctx,(unsigned char *)buf_in,inl); OPENSSL_cleanse(buf_in,(unsigned int)inl); OPENSSL_free(buf_in); if (EVP_VerifyFinal(&ctx,(unsigned char *)signature->data, (unsigned int)signature->length,pkey) <= 0) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB); ret=0; goto err; } /* we don't need to zero the 'ctx' because we just checked * public information */ /* memset(&ctx,0,sizeof(ctx)); */ ret=1; err: EVP_MD_CTX_cleanup(&ctx); return(ret); }
164,793
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: jas_seq2d_t *jas_seq2d_copy(jas_seq2d_t *x) { jas_matrix_t *y; int i; int j; y = jas_seq2d_create(jas_seq2d_xstart(x), jas_seq2d_ystart(x), jas_seq2d_xend(x), jas_seq2d_yend(x)); assert(y); for (i = 0; i < x->numrows_; ++i) { for (j = 0; j < x->numcols_; ++j) { *jas_matrix_getref(y, i, j) = jas_matrix_get(x, i, j); } } return y; } Commit Message: The generation of the configuration file jas_config.h has been completely reworked in order to avoid pollution of the global namespace. Some problematic types like uchar, ulong, and friends have been replaced with names with a jas_ prefix. An option max_samples has been added to the BMP and JPEG decoders to restrict the maximum size of image that they can decode. This change was made as a (possibly temporary) fix to address security concerns. A max_samples command-line option has also been added to imginfo. Whether an image component (for jas_image_t) is stored in memory or on disk is now based on the component size (rather than the image size). Some debug log message were added. Some new integer overflow checks were added. Some new safe integer add/multiply functions were added. More pre-C99 cruft was removed. JasPer has numerous "hacks" to handle pre-C99 compilers. JasPer now assumes C99 support. So, this pre-C99 cruft is unnecessary and can be removed. The regression jasper-doublefree-mem_close.jpg has been re-enabled. Theoretically, it should work more predictably now. CWE ID: CWE-190
jas_seq2d_t *jas_seq2d_copy(jas_seq2d_t *x) { jas_matrix_t *y; jas_matind_t i; jas_matind_t j; y = jas_seq2d_create(jas_seq2d_xstart(x), jas_seq2d_ystart(x), jas_seq2d_xend(x), jas_seq2d_yend(x)); assert(y); for (i = 0; i < x->numrows_; ++i) { for (j = 0; j < x->numcols_; ++j) { *jas_matrix_getref(y, i, j) = jas_matrix_get(x, i, j); } } return y; }
168,708
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb) { int ret; int size; if (ud->side == USBIP_STUB) { /* the direction of urb must be OUT. */ if (usb_pipein(urb->pipe)) return 0; size = urb->transfer_buffer_length; } else { /* the direction of urb must be IN. */ if (usb_pipeout(urb->pipe)) return 0; size = urb->actual_length; } /* no need to recv xbuff */ if (!(size > 0)) return 0; ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size); if (ret != size) { dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret); if (ud->side == USBIP_STUB) { usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); } else { usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return -EPIPE; } } return ret; } Commit Message: USB: usbip: fix potential out-of-bounds write Fix potential out-of-bounds write to urb->transfer_buffer usbip handles network communication directly in the kernel. When receiving a packet from its peer, usbip code parses headers according to protocol. As part of this parsing urb->actual_length is filled. Since the input for urb->actual_length comes from the network, it should be treated as untrusted. Any entity controlling the network may put any value in the input and the preallocated urb->transfer_buffer may not be large enough to hold the data. Thus, the malicious entity is able to write arbitrary data to kernel memory. Signed-off-by: Ignat Korchagin <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]> CWE ID: CWE-119
int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb) { int ret; int size; if (ud->side == USBIP_STUB) { /* the direction of urb must be OUT. */ if (usb_pipein(urb->pipe)) return 0; size = urb->transfer_buffer_length; } else { /* the direction of urb must be IN. */ if (usb_pipeout(urb->pipe)) return 0; size = urb->actual_length; } /* no need to recv xbuff */ if (!(size > 0)) return 0; if (size > urb->transfer_buffer_length) { /* should not happen, probably malicious packet */ if (ud->side == USBIP_STUB) { usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); return 0; } else { usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return -EPIPE; } } ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size); if (ret != size) { dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret); if (ud->side == USBIP_STUB) { usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); } else { usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return -EPIPE; } } return ret; }
167,322
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int main(int argc, char ** argv) { int c; unsigned long flags = MS_MANDLOCK; char * orgoptions = NULL; char * share_name = NULL; const char * ipaddr = NULL; char * uuid = NULL; char * mountpoint = NULL; char * options = NULL; char * optionstail; char * resolved_path = NULL; char * temp; char * dev_name; int rc = 0; int rsize = 0; int wsize = 0; int nomtab = 0; int uid = 0; int gid = 0; int optlen = 0; int orgoptlen = 0; size_t options_size = 0; size_t current_len; int retry = 0; /* set when we have to retry mount with uppercase */ struct addrinfo *addrhead = NULL, *addr; struct utsname sysinfo; struct mntent mountent; struct sockaddr_in *addr4; struct sockaddr_in6 *addr6; FILE * pmntfile; /* setlocale(LC_ALL, ""); bindtextdomain(PACKAGE, LOCALEDIR); textdomain(PACKAGE); */ if(argc && argv) thisprogram = argv[0]; else mount_cifs_usage(stderr); if(thisprogram == NULL) thisprogram = "mount.cifs"; uname(&sysinfo); /* BB add workstation name and domain and pass down */ /* #ifdef _GNU_SOURCE fprintf(stderr, " node: %s machine: %s sysname %s domain %s\n", sysinfo.nodename,sysinfo.machine,sysinfo.sysname,sysinfo.domainname); #endif */ if(argc > 2) { dev_name = argv[1]; share_name = strndup(argv[1], MAX_UNC_LEN); if (share_name == NULL) { fprintf(stderr, "%s: %s", argv[0], strerror(ENOMEM)); exit(EX_SYSERR); } mountpoint = argv[2]; } else if (argc == 2) { if ((strcmp(argv[1], "-V") == 0) || (strcmp(argv[1], "--version") == 0)) { print_cifs_mount_version(); exit(0); } if ((strcmp(argv[1], "-h") == 0) || (strcmp(argv[1], "-?") == 0) || (strcmp(argv[1], "--help") == 0)) mount_cifs_usage(stdout); mount_cifs_usage(stderr); } else { mount_cifs_usage(stderr); } /* add sharename in opts string as unc= parm */ while ((c = getopt_long (argc, argv, "afFhilL:no:O:rsSU:vVwt:", longopts, NULL)) != -1) { switch (c) { /* No code to do the following options yet */ /* case 'l': list_with_volumelabel = 1; break; case 'L': volumelabel = optarg; break; */ /* case 'a': ++mount_all; break; */ case '?': case 'h': /* help */ mount_cifs_usage(stdout); case 'n': ++nomtab; break; case 'b': #ifdef MS_BIND flags |= MS_BIND; #else fprintf(stderr, "option 'b' (MS_BIND) not supported\n"); #endif break; case 'm': #ifdef MS_MOVE flags |= MS_MOVE; #else fprintf(stderr, "option 'm' (MS_MOVE) not supported\n"); #endif break; case 'o': orgoptions = strdup(optarg); break; case 'r': /* mount readonly */ flags |= MS_RDONLY; break; case 'U': uuid = optarg; break; case 'v': ++verboseflag; break; case 'V': print_cifs_mount_version(); exit (0); case 'w': flags &= ~MS_RDONLY; break; case 'R': rsize = atoi(optarg) ; break; case 'W': wsize = atoi(optarg); break; case '1': if (isdigit(*optarg)) { char *ep; uid = strtoul(optarg, &ep, 10); if (*ep) { fprintf(stderr, "bad uid value \"%s\"\n", optarg); exit(EX_USAGE); } } else { struct passwd *pw; if (!(pw = getpwnam(optarg))) { fprintf(stderr, "bad user name \"%s\"\n", optarg); exit(EX_USAGE); } uid = pw->pw_uid; endpwent(); } break; case '2': if (isdigit(*optarg)) { char *ep; gid = strtoul(optarg, &ep, 10); if (*ep) { fprintf(stderr, "bad gid value \"%s\"\n", optarg); exit(EX_USAGE); } } else { struct group *gr; if (!(gr = getgrnam(optarg))) { fprintf(stderr, "bad user name \"%s\"\n", optarg); exit(EX_USAGE); } gid = gr->gr_gid; endpwent(); } break; case 'u': got_user = 1; user_name = optarg; break; case 'd': domain_name = optarg; /* BB fix this - currently ignored */ got_domain = 1; break; case 'p': if(mountpassword == NULL) mountpassword = (char *)calloc(MOUNT_PASSWD_SIZE+1,1); if(mountpassword) { got_password = 1; strlcpy(mountpassword,optarg,MOUNT_PASSWD_SIZE+1); } break; case 'S': get_password_from_file(0 /* stdin */,NULL); break; case 't': break; case 'f': ++fakemnt; break; default: fprintf(stderr, "unknown mount option %c\n",c); mount_cifs_usage(stderr); } } if((argc < 3) || (dev_name == NULL) || (mountpoint == NULL)) { mount_cifs_usage(stderr); } /* make sure mountpoint is legit */ rc = check_mountpoint(thisprogram, mountpoint); if (rc) goto mount_exit; /* enable any default user mount flags */ flags |= CIFS_SETUID_FLAGS; } Commit Message: CWE ID: CWE-59
int main(int argc, char ** argv) { int c; unsigned long flags = MS_MANDLOCK; char * orgoptions = NULL; char * share_name = NULL; const char * ipaddr = NULL; char * uuid = NULL; char * mountpoint = NULL; char * options = NULL; char * optionstail; char * resolved_path = NULL; char * temp; char * dev_name; int rc = 0; int rsize = 0; int wsize = 0; int nomtab = 0; int uid = 0; int gid = 0; int optlen = 0; int orgoptlen = 0; size_t options_size = 0; size_t current_len; int retry = 0; /* set when we have to retry mount with uppercase */ struct addrinfo *addrhead = NULL, *addr; struct utsname sysinfo; struct mntent mountent; struct sockaddr_in *addr4; struct sockaddr_in6 *addr6; FILE * pmntfile; /* setlocale(LC_ALL, ""); bindtextdomain(PACKAGE, LOCALEDIR); textdomain(PACKAGE); */ if(argc && argv) thisprogram = argv[0]; else mount_cifs_usage(stderr); if(thisprogram == NULL) thisprogram = "mount.cifs"; uname(&sysinfo); /* BB add workstation name and domain and pass down */ /* #ifdef _GNU_SOURCE fprintf(stderr, " node: %s machine: %s sysname %s domain %s\n", sysinfo.nodename,sysinfo.machine,sysinfo.sysname,sysinfo.domainname); #endif */ if(argc > 2) { dev_name = argv[1]; share_name = strndup(argv[1], MAX_UNC_LEN); if (share_name == NULL) { fprintf(stderr, "%s: %s", argv[0], strerror(ENOMEM)); exit(EX_SYSERR); } mountpoint = argv[2]; } else if (argc == 2) { if ((strcmp(argv[1], "-V") == 0) || (strcmp(argv[1], "--version") == 0)) { print_cifs_mount_version(); exit(0); } if ((strcmp(argv[1], "-h") == 0) || (strcmp(argv[1], "-?") == 0) || (strcmp(argv[1], "--help") == 0)) mount_cifs_usage(stdout); mount_cifs_usage(stderr); } else { mount_cifs_usage(stderr); } /* add sharename in opts string as unc= parm */ while ((c = getopt_long (argc, argv, "afFhilL:no:O:rsSU:vVwt:", longopts, NULL)) != -1) { switch (c) { /* No code to do the following options yet */ /* case 'l': list_with_volumelabel = 1; break; case 'L': volumelabel = optarg; break; */ /* case 'a': ++mount_all; break; */ case '?': case 'h': /* help */ mount_cifs_usage(stdout); case 'n': ++nomtab; break; case 'b': #ifdef MS_BIND flags |= MS_BIND; #else fprintf(stderr, "option 'b' (MS_BIND) not supported\n"); #endif break; case 'm': #ifdef MS_MOVE flags |= MS_MOVE; #else fprintf(stderr, "option 'm' (MS_MOVE) not supported\n"); #endif break; case 'o': orgoptions = strdup(optarg); break; case 'r': /* mount readonly */ flags |= MS_RDONLY; break; case 'U': uuid = optarg; break; case 'v': ++verboseflag; break; case 'V': print_cifs_mount_version(); exit (0); case 'w': flags &= ~MS_RDONLY; break; case 'R': rsize = atoi(optarg) ; break; case 'W': wsize = atoi(optarg); break; case '1': if (isdigit(*optarg)) { char *ep; uid = strtoul(optarg, &ep, 10); if (*ep) { fprintf(stderr, "bad uid value \"%s\"\n", optarg); exit(EX_USAGE); } } else { struct passwd *pw; if (!(pw = getpwnam(optarg))) { fprintf(stderr, "bad user name \"%s\"\n", optarg); exit(EX_USAGE); } uid = pw->pw_uid; endpwent(); } break; case '2': if (isdigit(*optarg)) { char *ep; gid = strtoul(optarg, &ep, 10); if (*ep) { fprintf(stderr, "bad gid value \"%s\"\n", optarg); exit(EX_USAGE); } } else { struct group *gr; if (!(gr = getgrnam(optarg))) { fprintf(stderr, "bad user name \"%s\"\n", optarg); exit(EX_USAGE); } gid = gr->gr_gid; endpwent(); } break; case 'u': got_user = 1; user_name = optarg; break; case 'd': domain_name = optarg; /* BB fix this - currently ignored */ got_domain = 1; break; case 'p': if(mountpassword == NULL) mountpassword = (char *)calloc(MOUNT_PASSWD_SIZE+1,1); if(mountpassword) { got_password = 1; strlcpy(mountpassword,optarg,MOUNT_PASSWD_SIZE+1); } break; case 'S': get_password_from_file(0 /* stdin */,NULL); break; case 't': break; case 'f': ++fakemnt; break; default: fprintf(stderr, "unknown mount option %c\n",c); mount_cifs_usage(stderr); } } if((argc < 3) || (dev_name == NULL) || (mountpoint == NULL)) { mount_cifs_usage(stderr); } /* make sure mountpoint is legit */ rc = chdir(mountpoint); if (rc) { fprintf(stderr, "Couldn't chdir to %s: %s\n", mountpoint, strerror(errno)); rc = EX_USAGE; goto mount_exit; } rc = check_mountpoint(thisprogram, mountpoint); if (rc) goto mount_exit; /* enable any default user mount flags */ flags |= CIFS_SETUID_FLAGS; }
165,169
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static inline void process_get_command(conn *c, token_t *tokens, size_t ntokens, bool return_cas) { char *key; size_t nkey; int i = 0; item *it; token_t *key_token = &tokens[KEY_TOKEN]; char *suffix; assert(c != NULL); do { while(key_token->length != 0) { key = key_token->value; nkey = key_token->length; if(nkey > KEY_MAX_LENGTH) { out_string(c, "CLIENT_ERROR bad command line format"); while (i-- > 0) { item_remove(*(c->ilist + i)); } return; } it = item_get(key, nkey, c, DO_UPDATE); if (settings.detail_enabled) { stats_prefix_record_get(key, nkey, NULL != it); } if (it) { if (i >= c->isize) { item **new_list = realloc(c->ilist, sizeof(item *) * c->isize * 2); if (new_list) { c->isize *= 2; c->ilist = new_list; } else { STATS_LOCK(); stats.malloc_fails++; STATS_UNLOCK(); item_remove(it); break; } } /* * Construct the response. Each hit adds three elements to the * outgoing data list: * "VALUE " * key * " " + flags + " " + data length + "\r\n" + data (with \r\n) */ if (return_cas || !settings.inline_ascii_response) { MEMCACHED_COMMAND_GET(c->sfd, ITEM_key(it), it->nkey, it->nbytes, ITEM_get_cas(it)); /* Goofy mid-flight realloc. */ if (i >= c->suffixsize) { char **new_suffix_list = realloc(c->suffixlist, sizeof(char *) * c->suffixsize * 2); if (new_suffix_list) { c->suffixsize *= 2; c->suffixlist = new_suffix_list; } else { STATS_LOCK(); stats.malloc_fails++; STATS_UNLOCK(); item_remove(it); break; } } suffix = do_cache_alloc(c->thread->suffix_cache); if (suffix == NULL) { STATS_LOCK(); stats.malloc_fails++; STATS_UNLOCK(); out_of_memory(c, "SERVER_ERROR out of memory making CAS suffix"); item_remove(it); while (i-- > 0) { item_remove(*(c->ilist + i)); } return; } *(c->suffixlist + i) = suffix; int suffix_len = make_ascii_get_suffix(suffix, it, return_cas); if (add_iov(c, "VALUE ", 6) != 0 || add_iov(c, ITEM_key(it), it->nkey) != 0 || (settings.inline_ascii_response && add_iov(c, ITEM_suffix(it), it->nsuffix - 2) != 0) || add_iov(c, suffix, suffix_len) != 0) { item_remove(it); break; } if ((it->it_flags & ITEM_CHUNKED) == 0) { add_iov(c, ITEM_data(it), it->nbytes); } else if (add_chunked_item_iovs(c, it, it->nbytes) != 0) { item_remove(it); break; } } else { MEMCACHED_COMMAND_GET(c->sfd, ITEM_key(it), it->nkey, it->nbytes, ITEM_get_cas(it)); if (add_iov(c, "VALUE ", 6) != 0 || add_iov(c, ITEM_key(it), it->nkey) != 0) { item_remove(it); break; } if ((it->it_flags & ITEM_CHUNKED) == 0) { if (add_iov(c, ITEM_suffix(it), it->nsuffix + it->nbytes) != 0) { item_remove(it); break; } } else if (add_iov(c, ITEM_suffix(it), it->nsuffix) != 0 || add_chunked_item_iovs(c, it, it->nbytes) != 0) { item_remove(it); break; } } if (settings.verbose > 1) { int ii; fprintf(stderr, ">%d sending key ", c->sfd); for (ii = 0; ii < it->nkey; ++ii) { fprintf(stderr, "%c", key[ii]); } fprintf(stderr, "\n"); } /* item_get() has incremented it->refcount for us */ pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.slab_stats[ITEM_clsid(it)].get_hits++; c->thread->stats.get_cmds++; pthread_mutex_unlock(&c->thread->stats.mutex); *(c->ilist + i) = it; i++; } else { pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.get_misses++; c->thread->stats.get_cmds++; pthread_mutex_unlock(&c->thread->stats.mutex); MEMCACHED_COMMAND_GET(c->sfd, key, nkey, -1, 0); } key_token++; } /* * If the command string hasn't been fully processed, get the next set * of tokens. */ if(key_token->value != NULL) { ntokens = tokenize_command(key_token->value, tokens, MAX_TOKENS); key_token = tokens; } } while(key_token->value != NULL); c->icurr = c->ilist; c->ileft = i; if (return_cas || !settings.inline_ascii_response) { c->suffixcurr = c->suffixlist; c->suffixleft = i; } if (settings.verbose > 1) fprintf(stderr, ">%d END\n", c->sfd); /* If the loop was terminated because of out-of-memory, it is not reliable to add END\r\n to the buffer, because it might not end in \r\n. So we send SERVER_ERROR instead. */ if (key_token->value != NULL || add_iov(c, "END\r\n", 5) != 0 || (IS_UDP(c->transport) && build_udp_headers(c) != 0)) { out_of_memory(c, "SERVER_ERROR out of memory writing get response"); } else { conn_set_state(c, conn_mwrite); c->msgcurr = 0; } } Commit Message: Don't overflow item refcount on get Counts as a miss if the refcount is too high. ASCII multigets are the only time refcounts can be held for so long. doing a dirty read of refcount. is aligned. trying to avoid adding an extra refcount branch for all calls of item_get due to performance. might be able to move it in there after logging refactoring simplifies some of the branches. CWE ID: CWE-190
static inline void process_get_command(conn *c, token_t *tokens, size_t ntokens, bool return_cas) { char *key; size_t nkey; int i = 0; item *it; token_t *key_token = &tokens[KEY_TOKEN]; char *suffix; assert(c != NULL); do { while(key_token->length != 0) { key = key_token->value; nkey = key_token->length; if(nkey > KEY_MAX_LENGTH) { out_string(c, "CLIENT_ERROR bad command line format"); while (i-- > 0) { item_remove(*(c->ilist + i)); } return; } it = limited_get(key, nkey, c); if (settings.detail_enabled) { stats_prefix_record_get(key, nkey, NULL != it); } if (it) { if (i >= c->isize) { item **new_list = realloc(c->ilist, sizeof(item *) * c->isize * 2); if (new_list) { c->isize *= 2; c->ilist = new_list; } else { STATS_LOCK(); stats.malloc_fails++; STATS_UNLOCK(); item_remove(it); break; } } /* * Construct the response. Each hit adds three elements to the * outgoing data list: * "VALUE " * key * " " + flags + " " + data length + "\r\n" + data (with \r\n) */ if (return_cas || !settings.inline_ascii_response) { MEMCACHED_COMMAND_GET(c->sfd, ITEM_key(it), it->nkey, it->nbytes, ITEM_get_cas(it)); /* Goofy mid-flight realloc. */ if (i >= c->suffixsize) { char **new_suffix_list = realloc(c->suffixlist, sizeof(char *) * c->suffixsize * 2); if (new_suffix_list) { c->suffixsize *= 2; c->suffixlist = new_suffix_list; } else { STATS_LOCK(); stats.malloc_fails++; STATS_UNLOCK(); item_remove(it); break; } } suffix = do_cache_alloc(c->thread->suffix_cache); if (suffix == NULL) { STATS_LOCK(); stats.malloc_fails++; STATS_UNLOCK(); out_of_memory(c, "SERVER_ERROR out of memory making CAS suffix"); item_remove(it); while (i-- > 0) { item_remove(*(c->ilist + i)); } return; } *(c->suffixlist + i) = suffix; int suffix_len = make_ascii_get_suffix(suffix, it, return_cas); if (add_iov(c, "VALUE ", 6) != 0 || add_iov(c, ITEM_key(it), it->nkey) != 0 || (settings.inline_ascii_response && add_iov(c, ITEM_suffix(it), it->nsuffix - 2) != 0) || add_iov(c, suffix, suffix_len) != 0) { item_remove(it); break; } if ((it->it_flags & ITEM_CHUNKED) == 0) { add_iov(c, ITEM_data(it), it->nbytes); } else if (add_chunked_item_iovs(c, it, it->nbytes) != 0) { item_remove(it); break; } } else { MEMCACHED_COMMAND_GET(c->sfd, ITEM_key(it), it->nkey, it->nbytes, ITEM_get_cas(it)); if (add_iov(c, "VALUE ", 6) != 0 || add_iov(c, ITEM_key(it), it->nkey) != 0) { item_remove(it); break; } if ((it->it_flags & ITEM_CHUNKED) == 0) { if (add_iov(c, ITEM_suffix(it), it->nsuffix + it->nbytes) != 0) { item_remove(it); break; } } else if (add_iov(c, ITEM_suffix(it), it->nsuffix) != 0 || add_chunked_item_iovs(c, it, it->nbytes) != 0) { item_remove(it); break; } } if (settings.verbose > 1) { int ii; fprintf(stderr, ">%d sending key ", c->sfd); for (ii = 0; ii < it->nkey; ++ii) { fprintf(stderr, "%c", key[ii]); } fprintf(stderr, "\n"); } /* item_get() has incremented it->refcount for us */ pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.slab_stats[ITEM_clsid(it)].get_hits++; c->thread->stats.get_cmds++; pthread_mutex_unlock(&c->thread->stats.mutex); *(c->ilist + i) = it; i++; } else { pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.get_misses++; c->thread->stats.get_cmds++; pthread_mutex_unlock(&c->thread->stats.mutex); MEMCACHED_COMMAND_GET(c->sfd, key, nkey, -1, 0); } key_token++; } /* * If the command string hasn't been fully processed, get the next set * of tokens. */ if(key_token->value != NULL) { ntokens = tokenize_command(key_token->value, tokens, MAX_TOKENS); key_token = tokens; } } while(key_token->value != NULL); c->icurr = c->ilist; c->ileft = i; if (return_cas || !settings.inline_ascii_response) { c->suffixcurr = c->suffixlist; c->suffixleft = i; } if (settings.verbose > 1) fprintf(stderr, ">%d END\n", c->sfd); /* If the loop was terminated because of out-of-memory, it is not reliable to add END\r\n to the buffer, because it might not end in \r\n. So we send SERVER_ERROR instead. */ if (key_token->value != NULL || add_iov(c, "END\r\n", 5) != 0 || (IS_UDP(c->transport) && build_udp_headers(c) != 0)) { out_of_memory(c, "SERVER_ERROR out of memory writing get response"); } else { conn_set_state(c, conn_mwrite); c->msgcurr = 0; } }
168,943
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static inline int object_common2(UNSERIALIZE_PARAMETER, long elements) { zval *retval_ptr = NULL; zval fname; if (Z_TYPE_PP(rval) != IS_OBJECT) { return 0; } if (!process_nested_data(UNSERIALIZE_PASSTHRU, Z_OBJPROP_PP(rval), elements, 1)) { /* We've got partially constructed object on our hands here. Wipe it. */ if(Z_TYPE_PP(rval) == IS_OBJECT) { zend_hash_clean(Z_OBJPROP_PP(rval)); } ZVAL_NULL(*rval); return 0; } if (Z_TYPE_PP(rval) != IS_OBJECT) { return 0; } if (Z_OBJCE_PP(rval) != PHP_IC_ENTRY && zend_hash_exists(&Z_OBJCE_PP(rval)->function_table, "__wakeup", sizeof("__wakeup"))) { INIT_PZVAL(&fname); ZVAL_STRINGL(&fname, "__wakeup", sizeof("__wakeup") - 1, 0); BG(serialize_lock)++; call_user_function_ex(CG(function_table), rval, &fname, &retval_ptr, 0, 0, 1, NULL TSRMLS_CC); BG(serialize_lock)--; } if (retval_ptr) { zval_ptr_dtor(&retval_ptr); } if (EG(exception)) { return 0; } return finish_nested_data(UNSERIALIZE_PASSTHRU); } Commit Message: Fix bug #73052 - Memory Corruption in During Deserialized-object Destruction CWE ID: CWE-119
static inline int object_common2(UNSERIALIZE_PARAMETER, long elements) { zval *retval_ptr = NULL; zval fname; if (Z_TYPE_PP(rval) != IS_OBJECT) { return 0; } if (!process_nested_data(UNSERIALIZE_PASSTHRU, Z_OBJPROP_PP(rval), elements, 1)) { /* We've got partially constructed object on our hands here. Wipe it. */ if(Z_TYPE_PP(rval) == IS_OBJECT) { zend_hash_clean(Z_OBJPROP_PP(rval)); zend_object_store_ctor_failed(*rval TSRMLS_CC); } ZVAL_NULL(*rval); return 0; } if (Z_TYPE_PP(rval) != IS_OBJECT) { return 0; } if (Z_OBJCE_PP(rval) != PHP_IC_ENTRY && zend_hash_exists(&Z_OBJCE_PP(rval)->function_table, "__wakeup", sizeof("__wakeup"))) { INIT_PZVAL(&fname); ZVAL_STRINGL(&fname, "__wakeup", sizeof("__wakeup") - 1, 0); BG(serialize_lock)++; call_user_function_ex(CG(function_table), rval, &fname, &retval_ptr, 0, 0, 1, NULL TSRMLS_CC); BG(serialize_lock)--; } if (retval_ptr) { zval_ptr_dtor(&retval_ptr); } if (EG(exception)) { return 0; } return finish_nested_data(UNSERIALIZE_PASSTHRU); }
166,940
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int http_connect(http_subtransport *t) { int error; char *proxy_url; if (t->connected && http_should_keep_alive(&t->parser) && t->parse_finished) return 0; if (t->io) { git_stream_close(t->io); git_stream_free(t->io); t->io = NULL; t->connected = 0; } if (t->connection_data.use_ssl) { error = git_tls_stream_new(&t->io, t->connection_data.host, t->connection_data.port); } else { #ifdef GIT_CURL error = git_curl_stream_new(&t->io, t->connection_data.host, t->connection_data.port); #else error = git_socket_stream_new(&t->io, t->connection_data.host, t->connection_data.port); #endif } if (error < 0) return error; GITERR_CHECK_VERSION(t->io, GIT_STREAM_VERSION, "git_stream"); if (git_stream_supports_proxy(t->io) && !git_remote__get_http_proxy(t->owner->owner, !!t->connection_data.use_ssl, &proxy_url)) { error = git_stream_set_proxy(t->io, proxy_url); git__free(proxy_url); if (error < 0) return error; } error = git_stream_connect(t->io); #if defined(GIT_OPENSSL) || defined(GIT_SECURE_TRANSPORT) || defined(GIT_CURL) if ((!error || error == GIT_ECERTIFICATE) && t->owner->certificate_check_cb != NULL && git_stream_is_encrypted(t->io)) { git_cert *cert; int is_valid; if ((error = git_stream_certificate(&cert, t->io)) < 0) return error; giterr_clear(); is_valid = error != GIT_ECERTIFICATE; error = t->owner->certificate_check_cb(cert, is_valid, t->connection_data.host, t->owner->message_cb_payload); if (error < 0) { if (!giterr_last()) giterr_set(GITERR_NET, "user cancelled certificate check"); return error; } } #endif if (error < 0) return error; t->connected = 1; return 0; } Commit Message: http: check certificate validity before clobbering the error variable CWE ID: CWE-284
static int http_connect(http_subtransport *t) { int error; char *proxy_url; if (t->connected && http_should_keep_alive(&t->parser) && t->parse_finished) return 0; if (t->io) { git_stream_close(t->io); git_stream_free(t->io); t->io = NULL; t->connected = 0; } if (t->connection_data.use_ssl) { error = git_tls_stream_new(&t->io, t->connection_data.host, t->connection_data.port); } else { #ifdef GIT_CURL error = git_curl_stream_new(&t->io, t->connection_data.host, t->connection_data.port); #else error = git_socket_stream_new(&t->io, t->connection_data.host, t->connection_data.port); #endif } if (error < 0) return error; GITERR_CHECK_VERSION(t->io, GIT_STREAM_VERSION, "git_stream"); if (git_stream_supports_proxy(t->io) && !git_remote__get_http_proxy(t->owner->owner, !!t->connection_data.use_ssl, &proxy_url)) { error = git_stream_set_proxy(t->io, proxy_url); git__free(proxy_url); if (error < 0) return error; } error = git_stream_connect(t->io); #if defined(GIT_OPENSSL) || defined(GIT_SECURE_TRANSPORT) || defined(GIT_CURL) if ((!error || error == GIT_ECERTIFICATE) && t->owner->certificate_check_cb != NULL && git_stream_is_encrypted(t->io)) { git_cert *cert; int is_valid = (error == GIT_OK); if ((error = git_stream_certificate(&cert, t->io)) < 0) return error; giterr_clear(); error = t->owner->certificate_check_cb(cert, is_valid, t->connection_data.host, t->owner->message_cb_payload); if (error < 0) { if (!giterr_last()) giterr_set(GITERR_NET, "user cancelled certificate check"); return error; } } #endif if (error < 0) return error; t->connected = 1; return 0; }
170,109
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static ssize_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate) { Image *mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; if (next_image->compression == RLECompression) { compact_pixels=AcquireCompactPixels(image); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if (next_image->storage_class != PseudoClass) { if (IsGrayImage(next_image,&next_image->exception) == MagickFalse) channels=next_image->colorspace == CMYKColorspace ? 4 : 3; if (next_image->matte != MagickFalse) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if (next_image->storage_class == PseudoClass) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsGrayImage(next_image,&next_image->exception) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateImage(next_image,MagickFalse); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->matte != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateImage(next_image,MagickFalse); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, &image->exception); if (mask != (Image *) NULL) { if (mask->compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } Commit Message: https://github.com/ImageMagick/ImageMagick/issues/350 CWE ID: CWE-787
static ssize_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate) { Image *mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; if (next_image->compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if (next_image->storage_class != PseudoClass) { if (IsGrayImage(next_image,&next_image->exception) == MagickFalse) channels=next_image->colorspace == CMYKColorspace ? 4 : 3; if (next_image->matte != MagickFalse) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if (next_image->storage_class == PseudoClass) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsGrayImage(next_image,&next_image->exception) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateImage(next_image,MagickFalse); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->matte != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateImage(next_image,MagickFalse); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, &image->exception); if (mask != (Image *) NULL) { if (mask->compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); }
170,103
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int key_notify_policy_flush(const struct km_event *c) { struct sk_buff *skb_out; struct sadb_msg *hdr; skb_out = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC); if (!skb_out) return -ENOBUFS; hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg)); hdr->sadb_msg_type = SADB_X_SPDFLUSH; hdr->sadb_msg_seq = c->seq; hdr->sadb_msg_pid = c->portid; hdr->sadb_msg_version = PF_KEY_V2; hdr->sadb_msg_errno = (uint8_t) 0; hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); return 0; } Commit Message: af_key: fix info leaks in notify messages key_notify_sa_flush() and key_notify_policy_flush() miss to initialize the sadb_msg_reserved member of the broadcasted message and thereby leak 2 bytes of heap memory to listeners. Fix that. Signed-off-by: Mathias Krause <[email protected]> Cc: Steffen Klassert <[email protected]> Cc: "David S. Miller" <[email protected]> Cc: Herbert Xu <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-119
static int key_notify_policy_flush(const struct km_event *c) { struct sk_buff *skb_out; struct sadb_msg *hdr; skb_out = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC); if (!skb_out) return -ENOBUFS; hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg)); hdr->sadb_msg_type = SADB_X_SPDFLUSH; hdr->sadb_msg_seq = c->seq; hdr->sadb_msg_pid = c->portid; hdr->sadb_msg_version = PF_KEY_V2; hdr->sadb_msg_errno = (uint8_t) 0; hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); hdr->sadb_msg_reserved = 0; pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); return 0; }
166,074
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ProcessRequest() { DCHECK_CURRENTLY_ON(BrowserThread::UI); timer.Stop(); // Erase reference to self. WallpaperManager* manager = WallpaperManager::Get(); if (manager->pending_inactive_ == this) manager->pending_inactive_ = NULL; started_load_at_ = base::Time::Now(); if (default_) { manager->DoSetDefaultWallpaper(account_id_, std::move(on_finish_)); } else if (!user_wallpaper_.isNull()) { SetWallpaper(user_wallpaper_, info_); } else if (!wallpaper_path_.empty()) { manager->task_runner_->PostTask( FROM_HERE, base::BindOnce(&WallpaperManager::GetCustomWallpaperInternal, account_id_, info_, wallpaper_path_, true /* update wallpaper */, base::ThreadTaskRunnerHandle::Get(), base::Passed(std::move(on_finish_)), manager->weak_factory_.GetWeakPtr())); } else if (!info_.location.empty()) { manager->LoadWallpaper(account_id_, info_, true, std::move(on_finish_)); } else { NOTREACHED(); started_load_at_ = base::Time(); } on_finish_.reset(); } Commit Message: [reland] Do not set default wallpaper unless it should do so. [email protected], [email protected] Bug: 751382 Change-Id: Id0793dfe467f737526a95b1e66ed01fbb8860bda Reviewed-on: https://chromium-review.googlesource.com/619754 Commit-Queue: Xiaoqian Dai <[email protected]> Reviewed-by: Alexander Alekseev <[email protected]> Reviewed-by: Biao She <[email protected]> Cr-Original-Commit-Position: refs/heads/master@{#498325} Reviewed-on: https://chromium-review.googlesource.com/646430 Cr-Commit-Position: refs/heads/master@{#498982} CWE ID: CWE-200
void ProcessRequest() { DCHECK_CURRENTLY_ON(BrowserThread::UI); timer.Stop(); // Erase reference to self. WallpaperManager* manager = WallpaperManager::Get(); if (manager->pending_inactive_ == this) manager->pending_inactive_ = NULL; started_load_at_ = base::Time::Now(); if (default_) { manager->DoSetDefaultWallpaper(account_id_, true /* update_wallpaper */, std::move(on_finish_)); } else if (!user_wallpaper_.isNull()) { SetWallpaper(user_wallpaper_, info_); } else if (!wallpaper_path_.empty()) { manager->task_runner_->PostTask( FROM_HERE, base::BindOnce(&WallpaperManager::GetCustomWallpaperInternal, account_id_, info_, wallpaper_path_, true /* update wallpaper */, base::ThreadTaskRunnerHandle::Get(), base::Passed(std::move(on_finish_)), manager->weak_factory_.GetWeakPtr())); } else if (!info_.location.empty()) { manager->LoadWallpaper(account_id_, info_, true /* update_wallpaper */, std::move(on_finish_)); } else { NOTREACHED(); started_load_at_ = base::Time(); } on_finish_.reset(); }
171,969
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: UsbChooserContext::UsbChooserContext(Profile* profile) : ChooserContextBase(profile, CONTENT_SETTINGS_TYPE_USB_GUARD, CONTENT_SETTINGS_TYPE_USB_CHOOSER_DATA), is_incognito_(profile->IsOffTheRecord()), client_binding_(this), weak_factory_(this) {} Commit Message: Enforce the WebUsbAllowDevicesForUrls policy This change modifies UsbChooserContext to use the UsbAllowDevicesForUrls class to consider devices allowed by the WebUsbAllowDevicesForUrls policy. The WebUsbAllowDevicesForUrls policy overrides the other WebUSB policies. Unit tests are also added to ensure that the policy is being enforced correctly. The design document for this feature is found at: https://docs.google.com/document/d/1MPvsrWiVD_jAC8ELyk8njFpy6j1thfVU5aWT3TCWE8w Bug: 854329 Change-Id: I5f82e662ca9dc544da5918eae766b5535a31296b Reviewed-on: https://chromium-review.googlesource.com/c/1259289 Commit-Queue: Ovidio Henriquez <[email protected]> Reviewed-by: Reilly Grant <[email protected]> Reviewed-by: Julian Pastarmov <[email protected]> Cr-Commit-Position: refs/heads/master@{#597926} CWE ID: CWE-119
UsbChooserContext::UsbChooserContext(Profile* profile) : ChooserContextBase(profile, CONTENT_SETTINGS_TYPE_USB_GUARD, CONTENT_SETTINGS_TYPE_USB_CHOOSER_DATA), is_incognito_(profile->IsOffTheRecord()), client_binding_(this),
173,336
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct hci_ufilter uf; struct sock *sk = sock->sk; int len, opt, err = 0; BT_DBG("sk %p, opt %d", sk, optname); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { err = -EINVAL; goto done; } switch (optname) { case HCI_DATA_DIR: if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR) opt = 1; else opt = 0; if (put_user(opt, optval)) err = -EFAULT; break; case HCI_TIME_STAMP: if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP) opt = 1; else opt = 0; if (put_user(opt, optval)) err = -EFAULT; break; case HCI_FILTER: { struct hci_filter *f = &hci_pi(sk)->filter; uf.type_mask = f->type_mask; uf.opcode = f->opcode; uf.event_mask[0] = *((u32 *) f->event_mask + 0); uf.event_mask[1] = *((u32 *) f->event_mask + 1); } len = min_t(unsigned int, len, sizeof(uf)); if (copy_to_user(optval, &uf, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } done: release_sock(sk); return err; } Commit Message: Bluetooth: HCI - Fix info leak in getsockopt(HCI_FILTER) The HCI code fails to initialize the two padding bytes of struct hci_ufilter before copying it to userland -- that for leaking two bytes kernel stack. Add an explicit memset(0) before filling the structure to avoid the info leak. Signed-off-by: Mathias Krause <[email protected]> Cc: Marcel Holtmann <[email protected]> Cc: Gustavo Padovan <[email protected]> Cc: Johan Hedberg <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-200
static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct hci_ufilter uf; struct sock *sk = sock->sk; int len, opt, err = 0; BT_DBG("sk %p, opt %d", sk, optname); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { err = -EINVAL; goto done; } switch (optname) { case HCI_DATA_DIR: if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR) opt = 1; else opt = 0; if (put_user(opt, optval)) err = -EFAULT; break; case HCI_TIME_STAMP: if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP) opt = 1; else opt = 0; if (put_user(opt, optval)) err = -EFAULT; break; case HCI_FILTER: { struct hci_filter *f = &hci_pi(sk)->filter; memset(&uf, 0, sizeof(uf)); uf.type_mask = f->type_mask; uf.opcode = f->opcode; uf.event_mask[0] = *((u32 *) f->event_mask + 0); uf.event_mask[1] = *((u32 *) f->event_mask + 1); } len = min_t(unsigned int, len, sizeof(uf)); if (copy_to_user(optval, &uf, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } done: release_sock(sk); return err; }
166,182
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int SocketStream::DoBeforeConnect() { next_state_ = STATE_BEFORE_CONNECT_COMPLETE; if (!context_.get() || !context_->network_delegate()) { return OK; } int result = context_->network_delegate()->NotifyBeforeSocketStreamConnect( this, io_callback_); if (result != OK && result != ERR_IO_PENDING) next_state_ = STATE_CLOSE; return result; } Commit Message: Revert a workaround commit for a Use-After-Free crash. Revert a workaround commit r20158 for a Use-After-Free issue (http://crbug.com/244746) because a cleaner CL r207218 is landed. URLRequestContext does not inherit SupportsWeakPtr now. R=mmenke BUG=244746 Review URL: https://chromiumcodereview.appspot.com/16870008 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@207811 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
int SocketStream::DoBeforeConnect() { next_state_ = STATE_BEFORE_CONNECT_COMPLETE; if (!context_ || !context_->network_delegate()) return OK; int result = context_->network_delegate()->NotifyBeforeSocketStreamConnect( this, io_callback_); if (result != OK && result != ERR_IO_PENDING) next_state_ = STATE_CLOSE; return result; }
171,253
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static zend_bool add_post_var(zval *arr, post_var_data_t *var, zend_bool eof) { char *ksep, *vsep, *val; size_t klen, vlen; size_t new_vlen; if (var->ptr >= var->end) { return 0; } vsep = memchr(var->ptr, '&', var->end - var->ptr); if (!vsep) { if (!eof) { return 0; } else { vsep = var->end; } } ksep = memchr(var->ptr, '=', vsep - var->ptr); if (ksep) { *ksep = '\0'; /* "foo=bar&" or "foo=&" */ klen = ksep - var->ptr; vlen = vsep - ++ksep; } else { ksep = ""; /* "foo&" */ klen = vsep - var->ptr; vlen = 0; } php_url_decode(var->ptr, klen); val = estrndup(ksep, vlen); if (vlen) { vlen = php_url_decode(val, vlen); } if (sapi_module.input_filter(PARSE_POST, var->ptr, &val, vlen, &new_vlen)) { php_register_variable_safe(var->ptr, val, new_vlen, arr); } efree(val); var->ptr = vsep + (vsep != var->end); return 1; } Commit Message: Fix bug #73807 CWE ID: CWE-400
static zend_bool add_post_var(zval *arr, post_var_data_t *var, zend_bool eof) { char *start, *ksep, *vsep, *val; size_t klen, vlen; size_t new_vlen; if (var->ptr >= var->end) { return 0; } start = var->ptr + var->already_scanned; vsep = memchr(start, '&', var->end - start); if (!vsep) { if (!eof) { var->already_scanned = var->end - var->ptr; return 0; } else { vsep = var->end; } } ksep = memchr(var->ptr, '=', vsep - var->ptr); if (ksep) { *ksep = '\0'; /* "foo=bar&" or "foo=&" */ klen = ksep - var->ptr; vlen = vsep - ++ksep; } else { ksep = ""; /* "foo&" */ klen = vsep - var->ptr; vlen = 0; } php_url_decode(var->ptr, klen); val = estrndup(ksep, vlen); if (vlen) { vlen = php_url_decode(val, vlen); } if (sapi_module.input_filter(PARSE_POST, var->ptr, &val, vlen, &new_vlen)) { php_register_variable_safe(var->ptr, val, new_vlen, arr); } efree(val); var->ptr = vsep + (vsep != var->end); var->already_scanned = 0; return 1; }
170,041
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int get_debug_info(struct PE_(r_bin_pe_obj_t)* bin, PE_(image_debug_directory_entry)* dbg_dir_entry, ut8* dbg_data, int dbg_data_len, SDebugInfo* res) { #define SIZEOF_FILE_NAME 255 int i = 0; const char* basename; if (!dbg_data) { return 0; } switch (dbg_dir_entry->Type) { case IMAGE_DEBUG_TYPE_CODEVIEW: if (!strncmp ((char*) dbg_data, "RSDS", 4)) { SCV_RSDS_HEADER rsds_hdr; init_rsdr_hdr (&rsds_hdr); if (!get_rsds (dbg_data, dbg_data_len, &rsds_hdr)) { bprintf ("Warning: Cannot read PE debug info\n"); return 0; } snprintf (res->guidstr, GUIDSTR_LEN, "%08x%04x%04x%02x%02x%02x%02x%02x%02x%02x%02x%x", rsds_hdr.guid.data1, rsds_hdr.guid.data2, rsds_hdr.guid.data3, rsds_hdr.guid.data4[0], rsds_hdr.guid.data4[1], rsds_hdr.guid.data4[2], rsds_hdr.guid.data4[3], rsds_hdr.guid.data4[4], rsds_hdr.guid.data4[5], rsds_hdr.guid.data4[6], rsds_hdr.guid.data4[7], rsds_hdr.age); basename = r_file_basename ((char*) rsds_hdr.file_name); strncpy (res->file_name, (const char*) basename, sizeof (res->file_name)); res->file_name[sizeof (res->file_name) - 1] = 0; rsds_hdr.free ((struct SCV_RSDS_HEADER*) &rsds_hdr); } else if (strncmp ((const char*) dbg_data, "NB10", 4) == 0) { SCV_NB10_HEADER nb10_hdr; init_cv_nb10_header (&nb10_hdr); get_nb10 (dbg_data, &nb10_hdr); snprintf (res->guidstr, sizeof (res->guidstr), "%x%x", nb10_hdr.timestamp, nb10_hdr.age); strncpy (res->file_name, (const char*) nb10_hdr.file_name, sizeof(res->file_name) - 1); res->file_name[sizeof (res->file_name) - 1] = 0; nb10_hdr.free ((struct SCV_NB10_HEADER*) &nb10_hdr); } else { bprintf ("CodeView section not NB10 or RSDS\n"); return 0; } break; default: return 0; } while (i < 33) { res->guidstr[i] = toupper ((int) res->guidstr[i]); i++; } return 1; } Commit Message: Fix crash in pe CWE ID: CWE-125
static int get_debug_info(struct PE_(r_bin_pe_obj_t)* bin, PE_(image_debug_directory_entry)* dbg_dir_entry, ut8* dbg_data, int dbg_data_len, SDebugInfo* res) { #define SIZEOF_FILE_NAME 255 int i = 0; const char* basename; if (!dbg_data) { return 0; } switch (dbg_dir_entry->Type) { case IMAGE_DEBUG_TYPE_CODEVIEW: if (!strncmp ((char*) dbg_data, "RSDS", 4)) { SCV_RSDS_HEADER rsds_hdr; init_rsdr_hdr (&rsds_hdr); if (!get_rsds (dbg_data, dbg_data_len, &rsds_hdr)) { bprintf ("Warning: Cannot read PE debug info\n"); return 0; } snprintf (res->guidstr, GUIDSTR_LEN, "%08x%04x%04x%02x%02x%02x%02x%02x%02x%02x%02x%x", rsds_hdr.guid.data1, rsds_hdr.guid.data2, rsds_hdr.guid.data3, rsds_hdr.guid.data4[0], rsds_hdr.guid.data4[1], rsds_hdr.guid.data4[2], rsds_hdr.guid.data4[3], rsds_hdr.guid.data4[4], rsds_hdr.guid.data4[5], rsds_hdr.guid.data4[6], rsds_hdr.guid.data4[7], rsds_hdr.age); basename = r_file_basename ((char*) rsds_hdr.file_name); strncpy (res->file_name, (const char*) basename, sizeof (res->file_name)); res->file_name[sizeof (res->file_name) - 1] = 0; rsds_hdr.free ((struct SCV_RSDS_HEADER*) &rsds_hdr); } else if (strncmp ((const char*) dbg_data, "NB10", 4) == 0) { if (dbg_data_len < 20) { eprintf ("Truncated NB10 entry, not enough data to parse\n"); return 0; } SCV_NB10_HEADER nb10_hdr = {{0}}; init_cv_nb10_header (&nb10_hdr); get_nb10 (dbg_data, &nb10_hdr); snprintf (res->guidstr, sizeof (res->guidstr), "%x%x", nb10_hdr.timestamp, nb10_hdr.age); res->file_name[0] = 0; if (nb10_hdr.file_name) { strncpy (res->file_name, (const char*) nb10_hdr.file_name, sizeof (res->file_name) - 1); } res->file_name[sizeof (res->file_name) - 1] = 0; nb10_hdr.free ((struct SCV_NB10_HEADER*) &nb10_hdr); } else { bprintf ("CodeView section not NB10 or RSDS\n"); return 0; } break; default: return 0; } while (i < 33) { res->guidstr[i] = toupper ((int) res->guidstr[i]); i++; } return 1; }
169,228
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: PHPAPI char *php_lookup_class_name(zval *object, zend_uint *nlen) { zval **val; char *retval = NULL; HashTable *object_properties; TSRMLS_FETCH(); object_properties = Z_OBJPROP_P(object); if (zend_hash_find(object_properties, MAGIC_MEMBER, sizeof(MAGIC_MEMBER), (void **) &val) == SUCCESS) { retval = estrndup(Z_STRVAL_PP(val), Z_STRLEN_PP(val)); if (nlen) { *nlen = Z_STRLEN_PP(val); } } return retval; } Commit Message: CWE ID:
PHPAPI char *php_lookup_class_name(zval *object, zend_uint *nlen) { zval **val; char *retval = NULL; HashTable *object_properties; TSRMLS_FETCH(); object_properties = Z_OBJPROP_P(object); if (zend_hash_find(object_properties, MAGIC_MEMBER, sizeof(MAGIC_MEMBER), (void **) &val) == SUCCESS && Z_TYPE_PP(val) == IS_STRING) { retval = estrndup(Z_STRVAL_PP(val), Z_STRLEN_PP(val)); if (nlen) { *nlen = Z_STRLEN_PP(val); } } return retval; }
165,303
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int get_task_ioprio(struct task_struct *p) { int ret; ret = security_task_getioprio(p); if (ret) goto out; ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM); if (p->io_context) ret = p->io_context->ioprio; out: return ret; } Commit Message: block: fix use-after-free in sys_ioprio_get() get_task_ioprio() accesses the task->io_context without holding the task lock and thus can race with exit_io_context(), leading to a use-after-free. The reproducer below hits this within a few seconds on my 4-core QEMU VM: #define _GNU_SOURCE #include <assert.h> #include <unistd.h> #include <sys/syscall.h> #include <sys/wait.h> int main(int argc, char **argv) { pid_t pid, child; long nproc, i; /* ioprio_set(IOPRIO_WHO_PROCESS, 0, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); */ syscall(SYS_ioprio_set, 1, 0, 0x6000); nproc = sysconf(_SC_NPROCESSORS_ONLN); for (i = 0; i < nproc; i++) { pid = fork(); assert(pid != -1); if (pid == 0) { for (;;) { pid = fork(); assert(pid != -1); if (pid == 0) { _exit(0); } else { child = wait(NULL); assert(child == pid); } } } pid = fork(); assert(pid != -1); if (pid == 0) { for (;;) { /* ioprio_get(IOPRIO_WHO_PGRP, 0); */ syscall(SYS_ioprio_get, 2, 0); } } } for (;;) { /* ioprio_get(IOPRIO_WHO_PGRP, 0); */ syscall(SYS_ioprio_get, 2, 0); } return 0; } This gets us KASAN dumps like this: [ 35.526914] ================================================================== [ 35.530009] BUG: KASAN: out-of-bounds in get_task_ioprio+0x7b/0x90 at addr ffff880066f34e6c [ 35.530009] Read of size 2 by task ioprio-gpf/363 [ 35.530009] ============================================================================= [ 35.530009] BUG blkdev_ioc (Not tainted): kasan: bad access detected [ 35.530009] ----------------------------------------------------------------------------- [ 35.530009] Disabling lock debugging due to kernel taint [ 35.530009] INFO: Allocated in create_task_io_context+0x2b/0x370 age=0 cpu=0 pid=360 [ 35.530009] ___slab_alloc+0x55d/0x5a0 [ 35.530009] __slab_alloc.isra.20+0x2b/0x40 [ 35.530009] kmem_cache_alloc_node+0x84/0x200 [ 35.530009] create_task_io_context+0x2b/0x370 [ 35.530009] get_task_io_context+0x92/0xb0 [ 35.530009] copy_process.part.8+0x5029/0x5660 [ 35.530009] _do_fork+0x155/0x7e0 [ 35.530009] SyS_clone+0x19/0x20 [ 35.530009] do_syscall_64+0x195/0x3a0 [ 35.530009] return_from_SYSCALL_64+0x0/0x6a [ 35.530009] INFO: Freed in put_io_context+0xe7/0x120 age=0 cpu=0 pid=1060 [ 35.530009] __slab_free+0x27b/0x3d0 [ 35.530009] kmem_cache_free+0x1fb/0x220 [ 35.530009] put_io_context+0xe7/0x120 [ 35.530009] put_io_context_active+0x238/0x380 [ 35.530009] exit_io_context+0x66/0x80 [ 35.530009] do_exit+0x158e/0x2b90 [ 35.530009] do_group_exit+0xe5/0x2b0 [ 35.530009] SyS_exit_group+0x1d/0x20 [ 35.530009] entry_SYSCALL_64_fastpath+0x1a/0xa4 [ 35.530009] INFO: Slab 0xffffea00019bcd00 objects=20 used=4 fp=0xffff880066f34ff0 flags=0x1fffe0000004080 [ 35.530009] INFO: Object 0xffff880066f34e58 @offset=3672 fp=0x0000000000000001 [ 35.530009] ================================================================== Fix it by grabbing the task lock while we poke at the io_context. Cc: [email protected] Reported-by: Dmitry Vyukov <[email protected]> Signed-off-by: Omar Sandoval <[email protected]> Signed-off-by: Jens Axboe <[email protected]> CWE ID: CWE-416
static int get_task_ioprio(struct task_struct *p) { int ret; ret = security_task_getioprio(p); if (ret) goto out; ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM); task_lock(p); if (p->io_context) ret = p->io_context->ioprio; task_unlock(p); out: return ret; }
166,925
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool GLES2DecoderImpl::SimulateAttrib0( GLuint max_vertex_accessed, bool* simulated) { DCHECK(simulated); *simulated = false; if (gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2) return true; const VertexAttribManager::VertexAttribInfo* info = vertex_attrib_manager_->GetVertexAttribInfo(0); bool attrib_0_used = current_program_->GetAttribInfoByLocation(0) != NULL; if (info->enabled() && attrib_0_used) { return true; } typedef VertexAttribManager::VertexAttribInfo::Vec4 Vec4; GLuint num_vertices = max_vertex_accessed + 1; GLuint size_needed = 0; if (num_vertices == 0 || !SafeMultiply(num_vertices, static_cast<GLuint>(sizeof(Vec4)), &size_needed) || size_needed > 0x7FFFFFFFU) { SetGLError(GL_OUT_OF_MEMORY, "glDrawXXX: Simulating attrib 0"); return false; } CopyRealGLErrorsToWrapper(); glBindBuffer(GL_ARRAY_BUFFER, attrib_0_buffer_id_); if (static_cast<GLsizei>(size_needed) > attrib_0_size_) { glBufferData(GL_ARRAY_BUFFER, size_needed, NULL, GL_DYNAMIC_DRAW); GLenum error = glGetError(); if (error != GL_NO_ERROR) { SetGLError(GL_OUT_OF_MEMORY, "glDrawXXX: Simulating attrib 0"); return false; } attrib_0_buffer_matches_value_ = false; } if (attrib_0_used && (!attrib_0_buffer_matches_value_ || (info->value().v[0] != attrib_0_value_.v[0] || info->value().v[1] != attrib_0_value_.v[1] || info->value().v[2] != attrib_0_value_.v[2] || info->value().v[3] != attrib_0_value_.v[3]))) { std::vector<Vec4> temp(num_vertices, info->value()); glBufferSubData(GL_ARRAY_BUFFER, 0, size_needed, &temp[0].v[0]); attrib_0_buffer_matches_value_ = true; attrib_0_value_ = info->value(); attrib_0_size_ = size_needed; } glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, NULL); if (info->divisor()) glVertexAttribDivisorANGLE(0, 0); *simulated = true; return true; } Commit Message: Always write data to new buffer in SimulateAttrib0 This is to work around linux nvidia driver bug. TEST=asan BUG=118970 Review URL: http://codereview.chromium.org/10019003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@131538 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID:
bool GLES2DecoderImpl::SimulateAttrib0( GLuint max_vertex_accessed, bool* simulated) { DCHECK(simulated); *simulated = false; if (gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2) return true; const VertexAttribManager::VertexAttribInfo* info = vertex_attrib_manager_->GetVertexAttribInfo(0); bool attrib_0_used = current_program_->GetAttribInfoByLocation(0) != NULL; if (info->enabled() && attrib_0_used) { return true; } typedef VertexAttribManager::VertexAttribInfo::Vec4 Vec4; GLuint num_vertices = max_vertex_accessed + 1; GLuint size_needed = 0; if (num_vertices == 0 || !SafeMultiply(num_vertices, static_cast<GLuint>(sizeof(Vec4)), &size_needed) || size_needed > 0x7FFFFFFFU) { SetGLError(GL_OUT_OF_MEMORY, "glDrawXXX: Simulating attrib 0"); return false; } CopyRealGLErrorsToWrapper(); glBindBuffer(GL_ARRAY_BUFFER, attrib_0_buffer_id_); bool new_buffer = static_cast<GLsizei>(size_needed) > attrib_0_size_; if (new_buffer) { glBufferData(GL_ARRAY_BUFFER, size_needed, NULL, GL_DYNAMIC_DRAW); GLenum error = glGetError(); if (error != GL_NO_ERROR) { SetGLError(GL_OUT_OF_MEMORY, "glDrawXXX: Simulating attrib 0"); return false; } } if (new_buffer || (attrib_0_used && (!attrib_0_buffer_matches_value_ || (info->value().v[0] != attrib_0_value_.v[0] || info->value().v[1] != attrib_0_value_.v[1] || info->value().v[2] != attrib_0_value_.v[2] || info->value().v[3] != attrib_0_value_.v[3])))) { std::vector<Vec4> temp(num_vertices, info->value()); glBufferSubData(GL_ARRAY_BUFFER, 0, size_needed, &temp[0].v[0]); attrib_0_buffer_matches_value_ = true; attrib_0_value_ = info->value(); attrib_0_size_ = size_needed; } glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, NULL); if (info->divisor()) glVertexAttribDivisorANGLE(0, 0); *simulated = true; return true; }
171,058
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static Image *ReadTGAImage(const ImageInfo *image_info, ExceptionInfo *exception) { Image *image; MagickBooleanType status; PixelInfo pixel; Quantum index; register Quantum *q; register ssize_t i, x; size_t base, flag, offset, real, skip; ssize_t count, y; TGAInfo tga_info; unsigned char j, k, pixels[4], runlength; unsigned int alpha_bits; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read TGA header information. */ count=ReadBlob(image,1,&tga_info.id_length); tga_info.colormap_type=(unsigned char) ReadBlobByte(image); tga_info.image_type=(TGAImageType) ReadBlobByte(image); if ((count != 1) || ((tga_info.image_type != TGAColormap) && (tga_info.image_type != TGARGB) && (tga_info.image_type != TGAMonochrome) && (tga_info.image_type != TGARLEColormap) && (tga_info.image_type != TGARLERGB) && (tga_info.image_type != TGARLEMonochrome)) || (((tga_info.image_type == TGAColormap) || (tga_info.image_type == TGARLEColormap)) && (tga_info.colormap_type == 0))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); tga_info.colormap_index=ReadBlobLSBShort(image); tga_info.colormap_length=ReadBlobLSBShort(image); tga_info.colormap_size=(unsigned char) ReadBlobByte(image); tga_info.x_origin=ReadBlobLSBShort(image); tga_info.y_origin=ReadBlobLSBShort(image); tga_info.width=(unsigned short) ReadBlobLSBShort(image); tga_info.height=(unsigned short) ReadBlobLSBShort(image); tga_info.bits_per_pixel=(unsigned char) ReadBlobByte(image); tga_info.attributes=(unsigned char) ReadBlobByte(image); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); if ((((tga_info.bits_per_pixel <= 1) || (tga_info.bits_per_pixel >= 17)) && (tga_info.bits_per_pixel != 24) && (tga_info.bits_per_pixel != 32))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image structure. */ image->columns=tga_info.width; image->rows=tga_info.height; alpha_bits=(tga_info.attributes & 0x0FU); image->alpha_trait=(alpha_bits > 0) || (tga_info.bits_per_pixel == 32) || (tga_info.colormap_size == 32) ? BlendPixelTrait : UndefinedPixelTrait; if ((tga_info.image_type != TGAColormap) && (tga_info.image_type != TGARLEColormap)) image->depth=(size_t) ((tga_info.bits_per_pixel <= 8) ? 8 : (tga_info.bits_per_pixel <= 16) ? 5 : (tga_info.bits_per_pixel == 24) ? 8 : (tga_info.bits_per_pixel == 32) ? 8 : 8); else image->depth=(size_t) ((tga_info.colormap_size <= 8) ? 8 : (tga_info.colormap_size <= 16) ? 5 : (tga_info.colormap_size == 24) ? 8 : (tga_info.colormap_size == 32) ? 8 : 8); if ((tga_info.image_type == TGAColormap) || (tga_info.image_type == TGAMonochrome) || (tga_info.image_type == TGARLEColormap) || (tga_info.image_type == TGARLEMonochrome)) image->storage_class=PseudoClass; image->compression=NoCompression; if ((tga_info.image_type == TGARLEColormap) || (tga_info.image_type == TGARLEMonochrome) || (tga_info.image_type == TGARLERGB)) image->compression=RLECompression; if (image->storage_class == PseudoClass) { if (tga_info.colormap_type != 0) image->colors=tga_info.colormap_index+tga_info.colormap_length; else { size_t one; one=1; image->colors=one << tga_info.bits_per_pixel; if (AcquireImageColormap(image,image->colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } } if (tga_info.id_length != 0) { char *comment; size_t length; /* TGA image comment. */ length=(size_t) tga_info.id_length; comment=(char *) NULL; if (~length >= (MagickPathExtent-1)) comment=(char *) AcquireQuantumMemory(length+MagickPathExtent, sizeof(*comment)); if (comment == (char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,tga_info.id_length,(unsigned char *) comment); comment[tga_info.id_length]='\0'; (void) SetImageProperty(image,"comment",comment,exception); comment=DestroyString(comment); } if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(image); } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); (void) ResetMagickMemory(&pixel,0,sizeof(pixel)); pixel.alpha=(MagickRealType) OpaqueAlpha; if (tga_info.colormap_type != 0) { /* Read TGA raster colormap. */ if (AcquireImageColormap(image,image->colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) tga_info.colormap_index; i++) image->colormap[i]=pixel; for ( ; i < (ssize_t) image->colors; i++) { switch (tga_info.colormap_size) { case 8: default: { /* Gray scale. */ pixel.red=(MagickRealType) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); pixel.green=pixel.red; pixel.blue=pixel.red; break; } case 15: case 16: { QuantumAny range; /* 5 bits each of red green and blue. */ j=(unsigned char) ReadBlobByte(image); k=(unsigned char) ReadBlobByte(image); range=GetQuantumRange(5UL); pixel.red=(MagickRealType) ScaleAnyToQuantum(1UL*(k & 0x7c) >> 2, range); pixel.green=(MagickRealType) ScaleAnyToQuantum((1UL*(k & 0x03) << 3)+(1UL*(j & 0xe0) >> 5),range); pixel.blue=(MagickRealType) ScaleAnyToQuantum(1UL*(j & 0x1f),range); break; } case 24: { /* 8 bits each of blue, green and red. */ pixel.blue=(MagickRealType) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); pixel.green=(MagickRealType) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); pixel.red=(MagickRealType) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); break; } case 32: { /* 8 bits each of blue, green, red, and alpha. */ pixel.blue=(MagickRealType) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); pixel.green=(MagickRealType) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); pixel.red=(MagickRealType) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); pixel.alpha=(MagickRealType) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); break; } } image->colormap[i]=pixel; } } /* Convert TGA pixels to pixel packets. */ base=0; flag=0; skip=MagickFalse; real=0; index=0; runlength=0; offset=0; for (y=0; y < (ssize_t) image->rows; y++) { real=offset; if (((unsigned char) (tga_info.attributes & 0x20) >> 5) == 0) real=image->rows-real-1; q=QueueAuthenticPixels(image,0,(ssize_t) real,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if ((tga_info.image_type == TGARLEColormap) || (tga_info.image_type == TGARLERGB) || (tga_info.image_type == TGARLEMonochrome)) { if (runlength != 0) { runlength--; skip=flag != 0; } else { count=ReadBlob(image,1,&runlength); if (count != 1) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); flag=runlength & 0x80; if (flag != 0) runlength-=128; skip=MagickFalse; } } if (skip == MagickFalse) switch (tga_info.bits_per_pixel) { case 8: default: { /* Gray scale. */ index=(Quantum) ReadBlobByte(image); if (tga_info.colormap_type != 0) pixel=image->colormap[(ssize_t) ConstrainColormapIndex(image, (ssize_t) index,exception)]; else { pixel.red=(MagickRealType) ScaleCharToQuantum((unsigned char) index); pixel.green=(MagickRealType) ScaleCharToQuantum((unsigned char) index); pixel.blue=(MagickRealType) ScaleCharToQuantum((unsigned char) index); } break; } case 15: case 16: { QuantumAny range; /* 5 bits each of RGB. */ if (ReadBlob(image,2,pixels) != 2) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); j=pixels[0]; k=pixels[1]; range=GetQuantumRange(5UL); pixel.red=(MagickRealType) ScaleAnyToQuantum(1UL*(k & 0x7c) >> 2, range); pixel.green=(MagickRealType) ScaleAnyToQuantum((1UL* (k & 0x03) << 3)+(1UL*(j & 0xe0) >> 5),range); pixel.blue=(MagickRealType) ScaleAnyToQuantum(1UL*(j & 0x1f),range); if (image->alpha_trait != UndefinedPixelTrait) pixel.alpha=(MagickRealType) ((k & 0x80) == 0 ? (Quantum) TransparentAlpha : (Quantum) OpaqueAlpha); if (image->storage_class == PseudoClass) index=(Quantum) ConstrainColormapIndex(image,((ssize_t) (k << 8))+ j,exception); break; } case 24: { /* BGR pixels. */ if (ReadBlob(image,3,pixels) != 3) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); pixel.blue=(MagickRealType) ScaleCharToQuantum(pixels[0]); pixel.green=(MagickRealType) ScaleCharToQuantum(pixels[1]); pixel.red=(MagickRealType) ScaleCharToQuantum(pixels[2]); break; } case 32: { /* BGRA pixels. */ if (ReadBlob(image,4,pixels) != 4) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); pixel.blue=(MagickRealType) ScaleCharToQuantum(pixels[0]); pixel.green=(MagickRealType) ScaleCharToQuantum(pixels[1]); pixel.red=(MagickRealType) ScaleCharToQuantum(pixels[2]); pixel.alpha=(MagickRealType) ScaleCharToQuantum(pixels[3]); break; } } if (status == MagickFalse) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); if (image->storage_class == PseudoClass) SetPixelIndex(image,index,q); SetPixelRed(image,ClampToQuantum(pixel.red),q); SetPixelGreen(image,ClampToQuantum(pixel.green),q); SetPixelBlue(image,ClampToQuantum(pixel.blue),q); if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } /* if (((unsigned char) (tga_info.attributes & 0xc0) >> 6) == 4) offset+=4; else */ if (((unsigned char) (tga_info.attributes & 0xc0) >> 6) == 2) offset+=2; else offset++; if (offset >= image->rows) { base++; offset=base; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } if (EOFBlob(image) != MagickFalse) ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); (void) CloseBlob(image); return(GetFirstImageInList(image)); } Commit Message: https://bugs.launchpad.net/ubuntu/+source/imagemagick/+bug/1490362 CWE ID: CWE-415
static Image *ReadTGAImage(const ImageInfo *image_info, ExceptionInfo *exception) { Image *image; MagickBooleanType status; PixelInfo pixel; Quantum index; register Quantum *q; register ssize_t i, x; size_t base, flag, offset, real, skip; ssize_t count, y; TGAInfo tga_info; unsigned char j, k, pixels[4], runlength; unsigned int alpha_bits; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read TGA header information. */ count=ReadBlob(image,1,&tga_info.id_length); tga_info.colormap_type=(unsigned char) ReadBlobByte(image); tga_info.image_type=(TGAImageType) ReadBlobByte(image); if ((count != 1) || ((tga_info.image_type != TGAColormap) && (tga_info.image_type != TGARGB) && (tga_info.image_type != TGAMonochrome) && (tga_info.image_type != TGARLEColormap) && (tga_info.image_type != TGARLERGB) && (tga_info.image_type != TGARLEMonochrome)) || (((tga_info.image_type == TGAColormap) || (tga_info.image_type == TGARLEColormap)) && (tga_info.colormap_type == 0))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); tga_info.colormap_index=ReadBlobLSBShort(image); tga_info.colormap_length=ReadBlobLSBShort(image); tga_info.colormap_size=(unsigned char) ReadBlobByte(image); tga_info.x_origin=ReadBlobLSBShort(image); tga_info.y_origin=ReadBlobLSBShort(image); tga_info.width=(unsigned short) ReadBlobLSBShort(image); tga_info.height=(unsigned short) ReadBlobLSBShort(image); tga_info.bits_per_pixel=(unsigned char) ReadBlobByte(image); tga_info.attributes=(unsigned char) ReadBlobByte(image); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); if ((((tga_info.bits_per_pixel <= 1) || (tga_info.bits_per_pixel >= 17)) && (tga_info.bits_per_pixel != 24) && (tga_info.bits_per_pixel != 32))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image structure. */ image->columns=tga_info.width; image->rows=tga_info.height; alpha_bits=(tga_info.attributes & 0x0FU); image->alpha_trait=(alpha_bits > 0) || (tga_info.bits_per_pixel == 32) || (tga_info.colormap_size == 32) ? BlendPixelTrait : UndefinedPixelTrait; if ((tga_info.image_type != TGAColormap) && (tga_info.image_type != TGARLEColormap)) image->depth=(size_t) ((tga_info.bits_per_pixel <= 8) ? 8 : (tga_info.bits_per_pixel <= 16) ? 5 : (tga_info.bits_per_pixel == 24) ? 8 : (tga_info.bits_per_pixel == 32) ? 8 : 8); else image->depth=(size_t) ((tga_info.colormap_size <= 8) ? 8 : (tga_info.colormap_size <= 16) ? 5 : (tga_info.colormap_size == 24) ? 8 : (tga_info.colormap_size == 32) ? 8 : 8); if ((tga_info.image_type == TGAColormap) || (tga_info.image_type == TGAMonochrome) || (tga_info.image_type == TGARLEColormap) || (tga_info.image_type == TGARLEMonochrome)) image->storage_class=PseudoClass; image->compression=NoCompression; if ((tga_info.image_type == TGARLEColormap) || (tga_info.image_type == TGARLEMonochrome) || (tga_info.image_type == TGARLERGB)) image->compression=RLECompression; if (image->storage_class == PseudoClass) { if (tga_info.colormap_type != 0) image->colors=tga_info.colormap_index+tga_info.colormap_length; else { size_t one; one=1; image->colors=one << tga_info.bits_per_pixel; if (AcquireImageColormap(image,image->colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } } if (tga_info.id_length != 0) { char *comment; size_t length; /* TGA image comment. */ length=(size_t) tga_info.id_length; comment=(char *) NULL; if (~length >= (MagickPathExtent-1)) comment=(char *) AcquireQuantumMemory(length+MagickPathExtent, sizeof(*comment)); if (comment == (char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,tga_info.id_length,(unsigned char *) comment); comment[tga_info.id_length]='\0'; (void) SetImageProperty(image,"comment",comment,exception); comment=DestroyString(comment); } if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(image); } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); (void) ResetMagickMemory(&pixel,0,sizeof(pixel)); pixel.alpha=(MagickRealType) OpaqueAlpha; if (tga_info.colormap_type != 0) { /* Read TGA raster colormap. */ if (image->colors < tga_info.colormap_index) image->colors=tga_info.colormap_index; if (AcquireImageColormap(image,image->colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) tga_info.colormap_index; i++) image->colormap[i]=pixel; for ( ; i < (ssize_t) image->colors; i++) { switch (tga_info.colormap_size) { case 8: default: { /* Gray scale. */ pixel.red=(MagickRealType) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); pixel.green=pixel.red; pixel.blue=pixel.red; break; } case 15: case 16: { QuantumAny range; /* 5 bits each of red green and blue. */ j=(unsigned char) ReadBlobByte(image); k=(unsigned char) ReadBlobByte(image); range=GetQuantumRange(5UL); pixel.red=(MagickRealType) ScaleAnyToQuantum(1UL*(k & 0x7c) >> 2, range); pixel.green=(MagickRealType) ScaleAnyToQuantum((1UL*(k & 0x03) << 3)+(1UL*(j & 0xe0) >> 5),range); pixel.blue=(MagickRealType) ScaleAnyToQuantum(1UL*(j & 0x1f),range); break; } case 24: { /* 8 bits each of blue, green and red. */ pixel.blue=(MagickRealType) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); pixel.green=(MagickRealType) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); pixel.red=(MagickRealType) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); break; } case 32: { /* 8 bits each of blue, green, red, and alpha. */ pixel.blue=(MagickRealType) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); pixel.green=(MagickRealType) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); pixel.red=(MagickRealType) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); pixel.alpha=(MagickRealType) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); break; } } image->colormap[i]=pixel; } } /* Convert TGA pixels to pixel packets. */ base=0; flag=0; skip=MagickFalse; real=0; index=0; runlength=0; offset=0; for (y=0; y < (ssize_t) image->rows; y++) { real=offset; if (((unsigned char) (tga_info.attributes & 0x20) >> 5) == 0) real=image->rows-real-1; q=QueueAuthenticPixels(image,0,(ssize_t) real,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if ((tga_info.image_type == TGARLEColormap) || (tga_info.image_type == TGARLERGB) || (tga_info.image_type == TGARLEMonochrome)) { if (runlength != 0) { runlength--; skip=flag != 0; } else { count=ReadBlob(image,1,&runlength); if (count != 1) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); flag=runlength & 0x80; if (flag != 0) runlength-=128; skip=MagickFalse; } } if (skip == MagickFalse) switch (tga_info.bits_per_pixel) { case 8: default: { /* Gray scale. */ index=(Quantum) ReadBlobByte(image); if (tga_info.colormap_type != 0) pixel=image->colormap[(ssize_t) ConstrainColormapIndex(image, (ssize_t) index,exception)]; else { pixel.red=(MagickRealType) ScaleCharToQuantum((unsigned char) index); pixel.green=(MagickRealType) ScaleCharToQuantum((unsigned char) index); pixel.blue=(MagickRealType) ScaleCharToQuantum((unsigned char) index); } break; } case 15: case 16: { QuantumAny range; /* 5 bits each of RGB. */ if (ReadBlob(image,2,pixels) != 2) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); j=pixels[0]; k=pixels[1]; range=GetQuantumRange(5UL); pixel.red=(MagickRealType) ScaleAnyToQuantum(1UL*(k & 0x7c) >> 2, range); pixel.green=(MagickRealType) ScaleAnyToQuantum((1UL* (k & 0x03) << 3)+(1UL*(j & 0xe0) >> 5),range); pixel.blue=(MagickRealType) ScaleAnyToQuantum(1UL*(j & 0x1f),range); if (image->alpha_trait != UndefinedPixelTrait) pixel.alpha=(MagickRealType) ((k & 0x80) == 0 ? (Quantum) TransparentAlpha : (Quantum) OpaqueAlpha); if (image->storage_class == PseudoClass) index=(Quantum) ConstrainColormapIndex(image,((ssize_t) (k << 8))+ j,exception); break; } case 24: { /* BGR pixels. */ if (ReadBlob(image,3,pixels) != 3) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); pixel.blue=(MagickRealType) ScaleCharToQuantum(pixels[0]); pixel.green=(MagickRealType) ScaleCharToQuantum(pixels[1]); pixel.red=(MagickRealType) ScaleCharToQuantum(pixels[2]); break; } case 32: { /* BGRA pixels. */ if (ReadBlob(image,4,pixels) != 4) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); pixel.blue=(MagickRealType) ScaleCharToQuantum(pixels[0]); pixel.green=(MagickRealType) ScaleCharToQuantum(pixels[1]); pixel.red=(MagickRealType) ScaleCharToQuantum(pixels[2]); pixel.alpha=(MagickRealType) ScaleCharToQuantum(pixels[3]); break; } } if (status == MagickFalse) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); if (image->storage_class == PseudoClass) SetPixelIndex(image,index,q); SetPixelRed(image,ClampToQuantum(pixel.red),q); SetPixelGreen(image,ClampToQuantum(pixel.green),q); SetPixelBlue(image,ClampToQuantum(pixel.blue),q); if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } /* if (((unsigned char) (tga_info.attributes & 0xc0) >> 6) == 4) offset+=4; else */ if (((unsigned char) (tga_info.attributes & 0xc0) >> 6) == 2) offset+=2; else offset++; if (offset >= image->rows) { base++; offset=base; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } if (EOFBlob(image) != MagickFalse) ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); (void) CloseBlob(image); return(GetFirstImageInList(image)); }
168,865
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static sk_sp<SkImage> scaleSkImage(sk_sp<SkImage> skImage, unsigned resizeWidth, unsigned resizeHeight, SkFilterQuality resizeQuality) { SkImageInfo resizedInfo = SkImageInfo::Make( resizeWidth, resizeHeight, kN32_SkColorType, kUnpremul_SkAlphaType); RefPtr<ArrayBuffer> dstBuffer = ArrayBuffer::createOrNull( resizeWidth * resizeHeight, resizedInfo.bytesPerPixel()); if (!dstBuffer) return nullptr; RefPtr<Uint8Array> resizedPixels = Uint8Array::create(dstBuffer, 0, dstBuffer->byteLength()); SkPixmap pixmap( resizedInfo, resizedPixels->data(), static_cast<size_t>(resizeWidth) * resizedInfo.bytesPerPixel()); skImage->scalePixels(pixmap, resizeQuality); return SkImage::MakeFromRaster(pixmap, [](const void*, void* pixels) { static_cast<Uint8Array*>(pixels)->deref(); }, resizedPixels.release().leakRef()); } Commit Message: Prevent bad casting in ImageBitmap when calling ArrayBuffer::createOrNull Currently when ImageBitmap's constructor is invoked, we check whether dstSize will overflow size_t or not. The problem comes when we call ArrayBuffer::createOrNull some times in the code. Both parameters of ArrayBuffer::createOrNull are unsigned. In ImageBitmap when we call this method, the first parameter is usually width * height. This could overflow unsigned even if it has been checked safe with size_t, the reason is that unsigned is a 32-bit value on 64-bit systems, while size_t is a 64-bit value. This CL makes a change such that we check whether the dstSize will overflow unsigned or not. In this case, we can guarantee that createOrNull will not have any crash. BUG=664139 Review-Url: https://codereview.chromium.org/2500493002 Cr-Commit-Position: refs/heads/master@{#431936} CWE ID: CWE-787
static sk_sp<SkImage> scaleSkImage(sk_sp<SkImage> skImage, unsigned resizeWidth, unsigned resizeHeight, SkFilterQuality resizeQuality) { SkImageInfo resizedInfo = SkImageInfo::Make( resizeWidth, resizeHeight, kN32_SkColorType, kUnpremul_SkAlphaType); RefPtr<ArrayBuffer> dstBuffer = ArrayBuffer::createOrNull( resizeWidth * resizeHeight, resizedInfo.bytesPerPixel()); if (!dstBuffer) return nullptr; RefPtr<Uint8Array> resizedPixels = Uint8Array::create(dstBuffer, 0, dstBuffer->byteLength()); SkPixmap pixmap( resizedInfo, resizedPixels->data(), static_cast<unsigned>(resizeWidth) * resizedInfo.bytesPerPixel()); skImage->scalePixels(pixmap, resizeQuality); return SkImage::MakeFromRaster(pixmap, [](const void*, void* pixels) { static_cast<Uint8Array*>(pixels)->deref(); }, resizedPixels.release().leakRef()); }
172,505
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: ConfirmInfoBar::ConfirmInfoBar(std::unique_ptr<ConfirmInfoBarDelegate> delegate) : InfoBarView(std::move(delegate)) { auto* delegate_ptr = GetDelegate(); label_ = CreateLabel(delegate_ptr->GetMessageText()); AddChildView(label_); const auto buttons = delegate_ptr->GetButtons(); if (buttons & ConfirmInfoBarDelegate::BUTTON_OK) { ok_button_ = CreateButton(ConfirmInfoBarDelegate::BUTTON_OK); ok_button_->SetProminent(true); if (delegate_ptr->OKButtonTriggersUACPrompt()) { elevation_icon_setter_.reset(new ElevationIconSetter( ok_button_, base::BindOnce(&ConfirmInfoBar::Layout, base::Unretained(this)))); } } if (buttons & ConfirmInfoBarDelegate::BUTTON_CANCEL) { cancel_button_ = CreateButton(ConfirmInfoBarDelegate::BUTTON_CANCEL); if (buttons == ConfirmInfoBarDelegate::BUTTON_CANCEL) cancel_button_->SetProminent(true); } link_ = CreateLink(delegate_ptr->GetLinkText(), this); AddChildView(link_); } Commit Message: Allow to specify elide behavior for confrim infobar message Used in "<extension name> is debugging this browser" infobar. Bug: 823194 Change-Id: Iff6627097c020cccca8f7cc3e21a803a41fd8f2c Reviewed-on: https://chromium-review.googlesource.com/1048064 Commit-Queue: Dmitry Gozman <[email protected]> Reviewed-by: Devlin <[email protected]> Reviewed-by: Peter Kasting <[email protected]> Cr-Commit-Position: refs/heads/master@{#557245} CWE ID: CWE-254
ConfirmInfoBar::ConfirmInfoBar(std::unique_ptr<ConfirmInfoBarDelegate> delegate) : InfoBarView(std::move(delegate)) { auto* delegate_ptr = GetDelegate(); label_ = CreateLabel(delegate_ptr->GetMessageText()); label_->SetElideBehavior(delegate_ptr->GetMessageElideBehavior()); AddChildView(label_); const auto buttons = delegate_ptr->GetButtons(); if (buttons & ConfirmInfoBarDelegate::BUTTON_OK) { ok_button_ = CreateButton(ConfirmInfoBarDelegate::BUTTON_OK); ok_button_->SetProminent(true); if (delegate_ptr->OKButtonTriggersUACPrompt()) { elevation_icon_setter_.reset(new ElevationIconSetter( ok_button_, base::BindOnce(&ConfirmInfoBar::Layout, base::Unretained(this)))); } } if (buttons & ConfirmInfoBarDelegate::BUTTON_CANCEL) { cancel_button_ = CreateButton(ConfirmInfoBarDelegate::BUTTON_CANCEL); if (buttons == ConfirmInfoBarDelegate::BUTTON_CANCEL) cancel_button_->SetProminent(true); } link_ = CreateLink(delegate_ptr->GetLinkText(), this); AddChildView(link_); }
173,165
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: EffectPaintPropertyNode* EffectPaintPropertyNode::Root() { DEFINE_STATIC_REF(EffectPaintPropertyNode, root, (EffectPaintPropertyNode::Create( nullptr, State{TransformPaintPropertyNode::Root(), ClipPaintPropertyNode::Root()}))); return root; } Commit Message: Reland "[CI] Make paint property nodes non-ref-counted" This reverts commit 887383b30842d9d9006e11bb6932660a3cb5b1b7. Reason for revert: Retry in M69. Original change's description: > Revert "[CI] Make paint property nodes non-ref-counted" > > This reverts commit 70fc0b018c9517558b7aa2be00edf2debb449123. > > Reason for revert: Caused bugs found by clusterfuzz > > Original change's description: > > [CI] Make paint property nodes non-ref-counted > > > > Now all paint property nodes are owned by ObjectPaintProperties > > (and LocalFrameView temporarily before removing non-RLS mode). > > Others just use raw pointers or references. > > > > Bug: 833496 > > Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2 > > Change-Id: I2d544fe153bb94698623248748df63c8aa2081ae > > Reviewed-on: https://chromium-review.googlesource.com/1031101 > > Reviewed-by: Tien-Ren Chen <[email protected]> > > Commit-Queue: Xianzhu Wang <[email protected]> > > Cr-Commit-Position: refs/heads/master@{#554626} > > [email protected],[email protected],[email protected] > > Change-Id: I02bb50d6744cb81a797246a0116b677e80a3c69f > No-Presubmit: true > No-Tree-Checks: true > No-Try: true > Bug: 833496,837932,837943 > Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2 > Reviewed-on: https://chromium-review.googlesource.com/1034292 > Reviewed-by: Xianzhu Wang <[email protected]> > Commit-Queue: Xianzhu Wang <[email protected]> > Cr-Commit-Position: refs/heads/master@{#554653} [email protected],[email protected],[email protected] # Not skipping CQ checks because original CL landed > 1 day ago. Bug: 833496, 837932, 837943 Change-Id: I0b4ef70db1f1f211ba97c30d617225355c750992 Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2 Reviewed-on: https://chromium-review.googlesource.com/1083491 Commit-Queue: Xianzhu Wang <[email protected]> Reviewed-by: Xianzhu Wang <[email protected]> Cr-Commit-Position: refs/heads/master@{#563930} CWE ID:
EffectPaintPropertyNode* EffectPaintPropertyNode::Root() { const EffectPaintPropertyNode& EffectPaintPropertyNode::Root() { DEFINE_STATIC_LOCAL(EffectPaintPropertyNode, root, (nullptr, State{&TransformPaintPropertyNode::Root(), &ClipPaintPropertyNode::Root()})); return root; }
171,834
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: std::string SanitizeEndpoint(const std::string& value) { if (value.find('&') != std::string::npos || value.find('?') != std::string::npos) return std::string(); return value; } Commit Message: DevTools: move front-end URL handling to DevToolsUIBindingds BUG=662859 Review-Url: https://codereview.chromium.org/2607833002 Cr-Commit-Position: refs/heads/master@{#440926} CWE ID: CWE-200
std::string SanitizeEndpoint(const std::string& value) {
172,457
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int config__parse_args(struct mosquitto_db *db, struct mosquitto__config *config, int argc, char *argv[]) { int i; int port_tmp; for(i=1; i<argc; i++){ if(!strcmp(argv[i], "-c") || !strcmp(argv[i], "--config-file")){ if(i<argc-1){ db->config_file = argv[i+1]; if(config__read(db, config, false)){ log__printf(NULL, MOSQ_LOG_ERR, "Error: Unable to open configuration file."); return MOSQ_ERR_INVAL; } }else{ log__printf(NULL, MOSQ_LOG_ERR, "Error: -c argument given, but no config file specified."); return MOSQ_ERR_INVAL; } i++; }else if(!strcmp(argv[i], "-d") || !strcmp(argv[i], "--daemon")){ config->daemon = true; }else if(!strcmp(argv[i], "-h") || !strcmp(argv[i], "--help")){ print_usage(); return MOSQ_ERR_INVAL; }else if(!strcmp(argv[i], "-p") || !strcmp(argv[i], "--port")){ if(i<argc-1){ port_tmp = atoi(argv[i+1]); if(port_tmp<1 || port_tmp>65535){ log__printf(NULL, MOSQ_LOG_ERR, "Error: Invalid port specified (%d).", port_tmp); return MOSQ_ERR_INVAL; }else{ if(config->default_listener.port){ log__printf(NULL, MOSQ_LOG_WARNING, "Warning: Default listener port specified multiple times. Only the latest will be used."); } config->default_listener.port = port_tmp; } }else{ log__printf(NULL, MOSQ_LOG_ERR, "Error: -p argument given, but no port specified."); return MOSQ_ERR_INVAL; } i++; }else if(!strcmp(argv[i], "-v") || !strcmp(argv[i], "--verbose")){ db->verbose = true; }else{ fprintf(stderr, "Error: Unknown option '%s'.\n",argv[i]); print_usage(); return MOSQ_ERR_INVAL; } } if(config->listener_count == 0 #ifdef WITH_TLS || config->default_listener.cafile || config->default_listener.capath || config->default_listener.certfile || config->default_listener.keyfile || config->default_listener.ciphers || config->default_listener.psk_hint || config->default_listener.require_certificate || config->default_listener.crlfile || config->default_listener.use_identity_as_username || config->default_listener.use_subject_as_username #endif || config->default_listener.use_username_as_clientid || config->default_listener.host || config->default_listener.port || config->default_listener.max_connections != -1 || config->default_listener.mount_point || config->default_listener.protocol != mp_mqtt || config->default_listener.socket_domain || config->default_listener.security_options.password_file || config->default_listener.security_options.psk_file || config->default_listener.security_options.auth_plugin_config_count || config->default_listener.security_options.allow_anonymous != -1 ){ config->listener_count++; config->listeners = mosquitto__realloc(config->listeners, sizeof(struct mosquitto__listener)*config->listener_count); if(!config->listeners){ log__printf(NULL, MOSQ_LOG_ERR, "Error: Out of memory."); return MOSQ_ERR_NOMEM; } memset(&config->listeners[config->listener_count-1], 0, sizeof(struct mosquitto__listener)); if(config->default_listener.port){ config->listeners[config->listener_count-1].port = config->default_listener.port; }else{ config->listeners[config->listener_count-1].port = 1883; } if(config->default_listener.host){ config->listeners[config->listener_count-1].host = config->default_listener.host; }else{ config->listeners[config->listener_count-1].host = NULL; } if(config->default_listener.mount_point){ config->listeners[config->listener_count-1].mount_point = config->default_listener.mount_point; }else{ config->listeners[config->listener_count-1].mount_point = NULL; } config->listeners[config->listener_count-1].max_connections = config->default_listener.max_connections; config->listeners[config->listener_count-1].protocol = config->default_listener.protocol; config->listeners[config->listener_count-1].socket_domain = config->default_listener.socket_domain; config->listeners[config->listener_count-1].client_count = 0; config->listeners[config->listener_count-1].socks = NULL; config->listeners[config->listener_count-1].sock_count = 0; config->listeners[config->listener_count-1].client_count = 0; config->listeners[config->listener_count-1].use_username_as_clientid = config->default_listener.use_username_as_clientid; #ifdef WITH_TLS config->listeners[config->listener_count-1].tls_version = config->default_listener.tls_version; config->listeners[config->listener_count-1].cafile = config->default_listener.cafile; config->listeners[config->listener_count-1].capath = config->default_listener.capath; config->listeners[config->listener_count-1].certfile = config->default_listener.certfile; config->listeners[config->listener_count-1].keyfile = config->default_listener.keyfile; config->listeners[config->listener_count-1].ciphers = config->default_listener.ciphers; config->listeners[config->listener_count-1].psk_hint = config->default_listener.psk_hint; config->listeners[config->listener_count-1].require_certificate = config->default_listener.require_certificate; config->listeners[config->listener_count-1].ssl_ctx = NULL; config->listeners[config->listener_count-1].crlfile = config->default_listener.crlfile; config->listeners[config->listener_count-1].use_identity_as_username = config->default_listener.use_identity_as_username; config->listeners[config->listener_count-1].use_subject_as_username = config->default_listener.use_subject_as_username; #endif config->listeners[config->listener_count-1].security_options.password_file = config->default_listener.security_options.password_file; config->listeners[config->listener_count-1].security_options.psk_file = config->default_listener.security_options.psk_file; config->listeners[config->listener_count-1].security_options.auth_plugin_configs = config->default_listener.security_options.auth_plugin_configs; config->listeners[config->listener_count-1].security_options.auth_plugin_config_count = config->default_listener.security_options.auth_plugin_config_count; config->listeners[config->listener_count-1].security_options.allow_anonymous = config->default_listener.security_options.allow_anonymous; } /* Default to drop to mosquitto user if we are privileged and no user specified. */ if(!config->user){ config->user = "mosquitto"; } if(db->verbose){ config->log_type = INT_MAX; } return config__check(config); } Commit Message: Fix acl_file being ignore for default listener if with per_listener_settings Close #1073. Thanks to Jef Driesen. Bug: https://github.com/eclipse/mosquitto/issues/1073 CWE ID:
int config__parse_args(struct mosquitto_db *db, struct mosquitto__config *config, int argc, char *argv[]) { int i; int port_tmp; for(i=1; i<argc; i++){ if(!strcmp(argv[i], "-c") || !strcmp(argv[i], "--config-file")){ if(i<argc-1){ db->config_file = argv[i+1]; if(config__read(db, config, false)){ log__printf(NULL, MOSQ_LOG_ERR, "Error: Unable to open configuration file."); return MOSQ_ERR_INVAL; } }else{ log__printf(NULL, MOSQ_LOG_ERR, "Error: -c argument given, but no config file specified."); return MOSQ_ERR_INVAL; } i++; }else if(!strcmp(argv[i], "-d") || !strcmp(argv[i], "--daemon")){ config->daemon = true; }else if(!strcmp(argv[i], "-h") || !strcmp(argv[i], "--help")){ print_usage(); return MOSQ_ERR_INVAL; }else if(!strcmp(argv[i], "-p") || !strcmp(argv[i], "--port")){ if(i<argc-1){ port_tmp = atoi(argv[i+1]); if(port_tmp<1 || port_tmp>65535){ log__printf(NULL, MOSQ_LOG_ERR, "Error: Invalid port specified (%d).", port_tmp); return MOSQ_ERR_INVAL; }else{ if(config->default_listener.port){ log__printf(NULL, MOSQ_LOG_WARNING, "Warning: Default listener port specified multiple times. Only the latest will be used."); } config->default_listener.port = port_tmp; } }else{ log__printf(NULL, MOSQ_LOG_ERR, "Error: -p argument given, but no port specified."); return MOSQ_ERR_INVAL; } i++; }else if(!strcmp(argv[i], "-v") || !strcmp(argv[i], "--verbose")){ db->verbose = true; }else{ fprintf(stderr, "Error: Unknown option '%s'.\n",argv[i]); print_usage(); return MOSQ_ERR_INVAL; } } if(config->listener_count == 0 #ifdef WITH_TLS || config->default_listener.cafile || config->default_listener.capath || config->default_listener.certfile || config->default_listener.keyfile || config->default_listener.ciphers || config->default_listener.psk_hint || config->default_listener.require_certificate || config->default_listener.crlfile || config->default_listener.use_identity_as_username || config->default_listener.use_subject_as_username #endif || config->default_listener.use_username_as_clientid || config->default_listener.host || config->default_listener.port || config->default_listener.max_connections != -1 || config->default_listener.mount_point || config->default_listener.protocol != mp_mqtt || config->default_listener.socket_domain || config->default_listener.security_options.password_file || config->default_listener.security_options.psk_file || config->default_listener.security_options.auth_plugin_config_count || config->default_listener.security_options.allow_anonymous != -1 ){ config->listener_count++; config->listeners = mosquitto__realloc(config->listeners, sizeof(struct mosquitto__listener)*config->listener_count); if(!config->listeners){ log__printf(NULL, MOSQ_LOG_ERR, "Error: Out of memory."); return MOSQ_ERR_NOMEM; } memset(&config->listeners[config->listener_count-1], 0, sizeof(struct mosquitto__listener)); if(config->default_listener.port){ config->listeners[config->listener_count-1].port = config->default_listener.port; }else{ config->listeners[config->listener_count-1].port = 1883; } if(config->default_listener.host){ config->listeners[config->listener_count-1].host = config->default_listener.host; }else{ config->listeners[config->listener_count-1].host = NULL; } if(config->default_listener.mount_point){ config->listeners[config->listener_count-1].mount_point = config->default_listener.mount_point; }else{ config->listeners[config->listener_count-1].mount_point = NULL; } config->listeners[config->listener_count-1].max_connections = config->default_listener.max_connections; config->listeners[config->listener_count-1].protocol = config->default_listener.protocol; config->listeners[config->listener_count-1].socket_domain = config->default_listener.socket_domain; config->listeners[config->listener_count-1].client_count = 0; config->listeners[config->listener_count-1].socks = NULL; config->listeners[config->listener_count-1].sock_count = 0; config->listeners[config->listener_count-1].client_count = 0; config->listeners[config->listener_count-1].use_username_as_clientid = config->default_listener.use_username_as_clientid; #ifdef WITH_TLS config->listeners[config->listener_count-1].tls_version = config->default_listener.tls_version; config->listeners[config->listener_count-1].cafile = config->default_listener.cafile; config->listeners[config->listener_count-1].capath = config->default_listener.capath; config->listeners[config->listener_count-1].certfile = config->default_listener.certfile; config->listeners[config->listener_count-1].keyfile = config->default_listener.keyfile; config->listeners[config->listener_count-1].ciphers = config->default_listener.ciphers; config->listeners[config->listener_count-1].psk_hint = config->default_listener.psk_hint; config->listeners[config->listener_count-1].require_certificate = config->default_listener.require_certificate; config->listeners[config->listener_count-1].ssl_ctx = NULL; config->listeners[config->listener_count-1].crlfile = config->default_listener.crlfile; config->listeners[config->listener_count-1].use_identity_as_username = config->default_listener.use_identity_as_username; config->listeners[config->listener_count-1].use_subject_as_username = config->default_listener.use_subject_as_username; #endif config->listeners[config->listener_count-1].security_options.acl_file = config->default_listener.security_options.acl_file; config->listeners[config->listener_count-1].security_options.password_file = config->default_listener.security_options.password_file; config->listeners[config->listener_count-1].security_options.psk_file = config->default_listener.security_options.psk_file; config->listeners[config->listener_count-1].security_options.auth_plugin_configs = config->default_listener.security_options.auth_plugin_configs; config->listeners[config->listener_count-1].security_options.auth_plugin_config_count = config->default_listener.security_options.auth_plugin_config_count; config->listeners[config->listener_count-1].security_options.allow_anonymous = config->default_listener.security_options.allow_anonymous; } /* Default to drop to mosquitto user if we are privileged and no user specified. */ if(!config->user){ config->user = "mosquitto"; } if(db->verbose){ config->log_type = INT_MAX; } return config__check(config); }
168,962
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, int sockaddr_len, int flags) { struct sock *sk = sock->sk; struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; struct pppox_sock *po = pppox_sk(sk); struct pptp_opt *opt = &po->proto.pptp; struct rtable *rt; struct flowi4 fl4; int error = 0; if (sp->sa_protocol != PX_PROTO_PPTP) return -EINVAL; if (lookup_chan_dst(sp->sa_addr.pptp.call_id, sp->sa_addr.pptp.sin_addr.s_addr)) return -EALREADY; lock_sock(sk); /* Check for already bound sockets */ if (sk->sk_state & PPPOX_CONNECTED) { error = -EBUSY; goto end; } /* Check for already disconnected sockets, on attempts to disconnect */ if (sk->sk_state & PPPOX_DEAD) { error = -EALREADY; goto end; } if (!opt->src_addr.sin_addr.s_addr || !sp->sa_addr.pptp.sin_addr.s_addr) { error = -EINVAL; goto end; } po->chan.private = sk; po->chan.ops = &pptp_chan_ops; rt = ip_route_output_ports(sock_net(sk), &fl4, sk, opt->dst_addr.sin_addr.s_addr, opt->src_addr.sin_addr.s_addr, 0, 0, IPPROTO_GRE, RT_CONN_FLAGS(sk), 0); if (IS_ERR(rt)) { error = -EHOSTUNREACH; goto end; } sk_setup_caps(sk, &rt->dst); po->chan.mtu = dst_mtu(&rt->dst); if (!po->chan.mtu) po->chan.mtu = PPP_MRU; ip_rt_put(rt); po->chan.mtu -= PPTP_HEADER_OVERHEAD; po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header); error = ppp_register_channel(&po->chan); if (error) { pr_err("PPTP: failed to register PPP channel (%d)\n", error); goto end; } opt->dst_addr = sp->sa_addr.pptp; sk->sk_state = PPPOX_CONNECTED; end: release_sock(sk); return error; } Commit Message: pptp: verify sockaddr_len in pptp_bind() and pptp_connect() Reported-by: Dmitry Vyukov <[email protected]> Signed-off-by: Cong Wang <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-200
static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, int sockaddr_len, int flags) { struct sock *sk = sock->sk; struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; struct pppox_sock *po = pppox_sk(sk); struct pptp_opt *opt = &po->proto.pptp; struct rtable *rt; struct flowi4 fl4; int error = 0; if (sockaddr_len < sizeof(struct sockaddr_pppox)) return -EINVAL; if (sp->sa_protocol != PX_PROTO_PPTP) return -EINVAL; if (lookup_chan_dst(sp->sa_addr.pptp.call_id, sp->sa_addr.pptp.sin_addr.s_addr)) return -EALREADY; lock_sock(sk); /* Check for already bound sockets */ if (sk->sk_state & PPPOX_CONNECTED) { error = -EBUSY; goto end; } /* Check for already disconnected sockets, on attempts to disconnect */ if (sk->sk_state & PPPOX_DEAD) { error = -EALREADY; goto end; } if (!opt->src_addr.sin_addr.s_addr || !sp->sa_addr.pptp.sin_addr.s_addr) { error = -EINVAL; goto end; } po->chan.private = sk; po->chan.ops = &pptp_chan_ops; rt = ip_route_output_ports(sock_net(sk), &fl4, sk, opt->dst_addr.sin_addr.s_addr, opt->src_addr.sin_addr.s_addr, 0, 0, IPPROTO_GRE, RT_CONN_FLAGS(sk), 0); if (IS_ERR(rt)) { error = -EHOSTUNREACH; goto end; } sk_setup_caps(sk, &rt->dst); po->chan.mtu = dst_mtu(&rt->dst); if (!po->chan.mtu) po->chan.mtu = PPP_MRU; ip_rt_put(rt); po->chan.mtu -= PPTP_HEADER_OVERHEAD; po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header); error = ppp_register_channel(&po->chan); if (error) { pr_err("PPTP: failed to register PPP channel (%d)\n", error); goto end; } opt->dst_addr = sp->sa_addr.pptp; sk->sk_state = PPPOX_CONNECTED; end: release_sock(sk); return error; }
166,561
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: copy_thread(unsigned long clone_flags, unsigned long stack_start, unsigned long stk_sz, struct task_struct *p) { struct thread_info *thread = task_thread_info(p); struct pt_regs *childregs = task_pt_regs(p); memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); if (likely(!(p->flags & PF_KTHREAD))) { *childregs = *current_pt_regs(); childregs->ARM_r0 = 0; if (stack_start) childregs->ARM_sp = stack_start; } else { memset(childregs, 0, sizeof(struct pt_regs)); thread->cpu_context.r4 = stk_sz; thread->cpu_context.r5 = stack_start; childregs->ARM_cpsr = SVC_MODE; } thread->cpu_context.pc = (unsigned long)ret_from_fork; thread->cpu_context.sp = (unsigned long)childregs; clear_ptrace_hw_breakpoint(p); if (clone_flags & CLONE_SETTLS) thread->tp_value = childregs->ARM_r3; thread_notify(THREAD_NOTIFY_COPY, thread); return 0; } Commit Message: ARM: 7735/2: Preserve the user r/w register TPIDRURW on context switch and fork Since commit 6a1c53124aa1 the user writeable TLS register was zeroed to prevent it from being used as a covert channel between two tasks. There are more and more applications coming to Windows RT, Wine could support them, but mostly they expect to have the thread environment block (TEB) in TPIDRURW. This patch preserves that register per thread instead of clearing it. Unlike the TPIDRURO, which is already switched, the TPIDRURW can be updated from userspace so needs careful treatment in the case that we modify TPIDRURW and call fork(). To avoid this we must always read TPIDRURW in copy_thread. Signed-off-by: André Hentschel <[email protected]> Signed-off-by: Will Deacon <[email protected]> Signed-off-by: Jonathan Austin <[email protected]> Signed-off-by: Russell King <[email protected]> CWE ID: CWE-264
copy_thread(unsigned long clone_flags, unsigned long stack_start, unsigned long stk_sz, struct task_struct *p) { struct thread_info *thread = task_thread_info(p); struct pt_regs *childregs = task_pt_regs(p); memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); if (likely(!(p->flags & PF_KTHREAD))) { *childregs = *current_pt_regs(); childregs->ARM_r0 = 0; if (stack_start) childregs->ARM_sp = stack_start; } else { memset(childregs, 0, sizeof(struct pt_regs)); thread->cpu_context.r4 = stk_sz; thread->cpu_context.r5 = stack_start; childregs->ARM_cpsr = SVC_MODE; } thread->cpu_context.pc = (unsigned long)ret_from_fork; thread->cpu_context.sp = (unsigned long)childregs; clear_ptrace_hw_breakpoint(p); if (clone_flags & CLONE_SETTLS) thread->tp_value[0] = childregs->ARM_r3; thread->tp_value[1] = get_tpuser(); thread_notify(THREAD_NOTIFY_COPY, thread); return 0; }
167,579
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void WebLocalFrameImpl::SetCommittedFirstRealLoad() { DCHECK(GetFrame()); GetFrame()->Loader().StateMachine()->AdvanceTo( FrameLoaderStateMachine::kCommittedMultipleRealLoads); GetFrame()->DidSendResourceTimingInfoToParent(); } Commit Message: Do not forward resource timing to parent frame after back-forward navigation LocalFrame has |should_send_resource_timing_info_to_parent_| flag not to send timing info to parent except for the first navigation. This flag is cleared when the first timing is sent to parent, however this does not happen if iframe's first navigation was by back-forward navigation. For such iframes, we shouldn't send timings to parent at all. Bug: 876822 Change-Id: I128b51a82ef278c439548afc8283ae63abdef5c5 Reviewed-on: https://chromium-review.googlesource.com/1186215 Reviewed-by: Kinuko Yasuda <[email protected]> Commit-Queue: Kunihiko Sakamoto <[email protected]> Cr-Commit-Position: refs/heads/master@{#585736} CWE ID: CWE-200
void WebLocalFrameImpl::SetCommittedFirstRealLoad() { DCHECK(GetFrame()); GetFrame()->Loader().StateMachine()->AdvanceTo( FrameLoaderStateMachine::kCommittedMultipleRealLoads); GetFrame()->SetShouldSendResourceTimingInfoToParent(false); }
172,655
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port) { int i; static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = { [DISCE_DISCOVER_DOMAIN] = sas_discover_domain, [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain, [DISCE_PROBE] = sas_probe_devices, [DISCE_SUSPEND] = sas_suspend_devices, [DISCE_RESUME] = sas_resume_devices, [DISCE_DESTRUCT] = sas_destruct_devices, }; disc->pending = 0; for (i = 0; i < DISC_NUM_EVENTS; i++) { INIT_SAS_WORK(&disc->disc_work[i].work, sas_event_fns[i]); disc->disc_work[i].port = port; } } Commit Message: scsi: libsas: direct call probe and destruct In commit 87c8331fcf72 ("[SCSI] libsas: prevent domain rediscovery competing with ata error handling") introduced disco mutex to prevent rediscovery competing with ata error handling and put the whole revalidation in the mutex. But the rphy add/remove needs to wait for the error handling which also grabs the disco mutex. This may leads to dead lock.So the probe and destruct event were introduce to do the rphy add/remove asynchronously and out of the lock. The asynchronously processed workers makes the whole discovery process not atomic, the other events may interrupt the process. For example, if a loss of signal event inserted before the probe event, the sas_deform_port() is called and the port will be deleted. And sas_port_delete() may run before the destruct event, but the port-x:x is the top parent of end device or expander. This leads to a kernel WARNING such as: [ 82.042979] sysfs group 'power' not found for kobject 'phy-1:0:22' [ 82.042983] ------------[ cut here ]------------ [ 82.042986] WARNING: CPU: 54 PID: 1714 at fs/sysfs/group.c:237 sysfs_remove_group+0x94/0xa0 [ 82.043059] Call trace: [ 82.043082] [<ffff0000082e7624>] sysfs_remove_group+0x94/0xa0 [ 82.043085] [<ffff00000864e320>] dpm_sysfs_remove+0x60/0x70 [ 82.043086] [<ffff00000863ee10>] device_del+0x138/0x308 [ 82.043089] [<ffff00000869a2d0>] sas_phy_delete+0x38/0x60 [ 82.043091] [<ffff00000869a86c>] do_sas_phy_delete+0x6c/0x80 [ 82.043093] [<ffff00000863dc20>] device_for_each_child+0x58/0xa0 [ 82.043095] [<ffff000008696f80>] sas_remove_children+0x40/0x50 [ 82.043100] [<ffff00000869d1bc>] sas_destruct_devices+0x64/0xa0 [ 82.043102] [<ffff0000080e93bc>] process_one_work+0x1fc/0x4b0 [ 82.043104] [<ffff0000080e96c0>] worker_thread+0x50/0x490 [ 82.043105] [<ffff0000080f0364>] kthread+0xfc/0x128 [ 82.043107] [<ffff0000080836c0>] ret_from_fork+0x10/0x50 Make probe and destruct a direct call in the disco and revalidate function, but put them outside the lock. The whole discovery or revalidate won't be interrupted by other events. And the DISCE_PROBE and DISCE_DESTRUCT event are deleted as a result of the direct call. Introduce a new list to destruct the sas_port and put the port delete after the destruct. This makes sure the right order of destroying the sysfs kobject and fix the warning above. In sas_ex_revalidate_domain() have a loop to find all broadcasted device, and sometimes we have a chance to find the same expander twice. Because the sas_port will be deleted at the end of the whole revalidate process, sas_port with the same name cannot be added before this. Otherwise the sysfs will complain of creating duplicate filename. Since the LLDD will send broadcast for every device change, we can only process one expander's revalidation. [mkp: kbuild test robot warning] Signed-off-by: Jason Yan <[email protected]> CC: John Garry <[email protected]> CC: Johannes Thumshirn <[email protected]> CC: Ewan Milne <[email protected]> CC: Christoph Hellwig <[email protected]> CC: Tomas Henzl <[email protected]> CC: Dan Williams <[email protected]> Reviewed-by: Hannes Reinecke <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]> CWE ID:
void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port) { int i; static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = { [DISCE_DISCOVER_DOMAIN] = sas_discover_domain, [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain, [DISCE_SUSPEND] = sas_suspend_devices, [DISCE_RESUME] = sas_resume_devices, }; disc->pending = 0; for (i = 0; i < DISC_NUM_EVENTS; i++) { INIT_SAS_WORK(&disc->disc_work[i].work, sas_event_fns[i]); disc->disc_work[i].port = port; } }
169,387
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: irc_server_gnutls_callback (void *data, gnutls_session_t tls_session, const gnutls_datum_t *req_ca, int nreq, const gnutls_pk_algorithm_t *pk_algos, int pk_algos_len, gnutls_retr_st *answer) { struct t_irc_server *server; gnutls_retr_st tls_struct; const gnutls_datum_t *cert_list; gnutls_datum_t filedatum; unsigned int cert_list_len, status; time_t cert_time; char *cert_path0, *cert_path1, *cert_path2, *cert_str, *hostname; const char *weechat_dir; int rc, ret, i, j, hostname_match; #if LIBGNUTLS_VERSION_NUMBER >= 0x010706 gnutls_datum_t cinfo; int rinfo; #endif /* make C compiler happy */ (void) req_ca; (void) nreq; (void) pk_algos; (void) pk_algos_len; rc = 0; if (!data) return -1; server = (struct t_irc_server *) data; hostname = server->current_address; hostname = server->current_address; hostname_match = 0; weechat_printf (server->buffer, _("gnutls: connected using %d-bit Diffie-Hellman shared " "secret exchange"), IRC_SERVER_OPTION_INTEGER (server, IRC_SERVER_OPTION_SSL_DHKEY_SIZE)); if (gnutls_certificate_verify_peers2 (tls_session, &status) < 0) { weechat_printf (server->buffer, _("%sgnutls: error while checking peer's certificate"), weechat_prefix ("error")); rc = -1; } else { /* some checks */ if (status & GNUTLS_CERT_INVALID) { weechat_printf (server->buffer, _("%sgnutls: peer's certificate is NOT trusted"), weechat_prefix ("error")); rc = -1; } else { weechat_printf (server->buffer, _("gnutls: peer's certificate is trusted")); } if (status & GNUTLS_CERT_SIGNER_NOT_FOUND) { weechat_printf (server->buffer, _("%sgnutls: peer's certificate issuer is unknown"), weechat_prefix ("error")); rc = -1; } if (status & GNUTLS_CERT_REVOKED) { weechat_printf (server->buffer, _("%sgnutls: the certificate has been revoked"), weechat_prefix ("error")); rc = -1; } /* check certificates */ if (gnutls_x509_crt_init (&cert_temp) >= 0) { cert_list = gnutls_certificate_get_peers (tls_session, &cert_list_len); if (cert_list) { weechat_printf (server->buffer, NG_("gnutls: receiving %d certificate", "gnutls: receiving %d certificates", cert_list_len), cert_list_len); for (i = 0, j = (int) cert_list_len; i < j; i++) { if (gnutls_x509_crt_import (cert_temp, &cert_list[i], GNUTLS_X509_FMT_DER) >= 0) { /* checking if hostname matches in the first certificate */ if (i == 0 && gnutls_x509_crt_check_hostname (cert_temp, hostname) != 0) { hostname_match = 1; } #if LIBGNUTLS_VERSION_NUMBER >= 0x010706 /* displaying infos about certificate */ #if LIBGNUTLS_VERSION_NUMBER < 0x020400 rinfo = gnutls_x509_crt_print (cert_temp, GNUTLS_X509_CRT_ONELINE, &cinfo); #else rinfo = gnutls_x509_crt_print (cert_temp, GNUTLS_CRT_PRINT_ONELINE, &cinfo); #endif if (rinfo == 0) { weechat_printf (server->buffer, _(" - certificate[%d] info:"), i + 1); weechat_printf (server->buffer, " - %s", cinfo.data); gnutls_free (cinfo.data); } #endif /* check expiration date */ cert_time = gnutls_x509_crt_get_expiration_time (cert_temp); if (cert_time < time(NULL)) { weechat_printf (server->buffer, _("%sgnutls: certificate has expired"), weechat_prefix ("error")); rc = -1; } /* check expiration date */ cert_time = gnutls_x509_crt_get_activation_time (cert_temp); if (cert_time > time(NULL)) { weechat_printf (server->buffer, _("%sgnutls: certificate is not yet activated"), weechat_prefix ("error")); rc = -1; } } } if (hostname_match == 0) { weechat_printf (server->buffer, _("%sgnutls: the hostname in the " "certificate does NOT match \"%s\""), weechat_prefix ("error"), hostname); rc = -1; } } } } /* using client certificate if it exists */ cert_path0 = (char *) IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SSL_CERT); if (cert_path0 && cert_path0[0]) { weechat_dir = weechat_info_get ("weechat_dir", ""); cert_path1 = weechat_string_replace (cert_path0, "%h", weechat_dir); cert_path2 = (cert_path1) ? weechat_string_expand_home (cert_path1) : NULL; if (cert_path2) { cert_str = weechat_file_get_content (cert_path2); if (cert_str) { weechat_printf (server->buffer, _("gnutls: sending one certificate")); filedatum.data = (unsigned char *) cert_str; filedatum.size = strlen (cert_str); /* certificate */ gnutls_x509_crt_init (&server->tls_cert); gnutls_x509_crt_import (server->tls_cert, &filedatum, GNUTLS_X509_FMT_PEM); /* key */ gnutls_x509_privkey_init (&server->tls_cert_key); ret = gnutls_x509_privkey_import (server->tls_cert_key, &filedatum, GNUTLS_X509_FMT_PEM); if (ret < 0) { ret = gnutls_x509_privkey_import_pkcs8 (server->tls_cert_key, &filedatum, GNUTLS_X509_FMT_PEM, NULL, GNUTLS_PKCS_PLAIN); } if (ret < 0) { weechat_printf (server->buffer, _("%sgnutls: invalid certificate \"%s\", " "error: %s"), weechat_prefix ("error"), cert_path2, gnutls_strerror (ret)); rc = -1; } else { tls_struct.type = GNUTLS_CRT_X509; tls_struct.ncerts = 1; tls_struct.deinit_all = 0; tls_struct.cert.x509 = &server->tls_cert; tls_struct.key.x509 = server->tls_cert_key; #if LIBGNUTLS_VERSION_NUMBER >= 0x010706 /* client certificate info */ #if LIBGNUTLS_VERSION_NUMBER < 0x020400 rinfo = gnutls_x509_crt_print (server->tls_cert, GNUTLS_X509_CRT_ONELINE, &cinfo); #else rinfo = gnutls_x509_crt_print (server->tls_cert, GNUTLS_CRT_PRINT_ONELINE, &cinfo); #endif if (rinfo == 0) { weechat_printf (server->buffer, _(" - client certificate info (%s):"), cert_path2); weechat_printf (server->buffer, " - %s", cinfo.data); gnutls_free (cinfo.data); } #endif memcpy (answer, &tls_struct, sizeof (gnutls_retr_st)); free (cert_str); } } else { weechat_printf (server->buffer, _("%sgnutls: unable to read certifcate \"%s\""), weechat_prefix ("error"), cert_path2); } } if (cert_path1) free (cert_path1); if (cert_path2) free (cert_path2); } /* an error should stop the handshake unless the user doesn't care */ return rc; } Commit Message: CWE ID: CWE-20
irc_server_gnutls_callback (void *data, gnutls_session_t tls_session, const gnutls_datum_t *req_ca, int nreq, const gnutls_pk_algorithm_t *pk_algos, int pk_algos_len, gnutls_retr_st *answer, int action) { struct t_irc_server *server; gnutls_retr_st tls_struct; const gnutls_datum_t *cert_list; gnutls_datum_t filedatum; unsigned int cert_list_len, status; time_t cert_time; char *cert_path0, *cert_path1, *cert_path2, *cert_str, *hostname; const char *weechat_dir; int rc, ret, i, j, hostname_match; #if LIBGNUTLS_VERSION_NUMBER >= 0x010706 gnutls_datum_t cinfo; int rinfo; #endif /* make C compiler happy */ (void) req_ca; (void) nreq; (void) pk_algos; (void) pk_algos_len; rc = 0; if (!data) return -1; server = (struct t_irc_server *) data; hostname = server->current_address; hostname = server->current_address; hostname_match = 0; if (action == WEECHAT_HOOK_CONNECT_GNUTLS_CB_VERIFY_CERT) { weechat_printf (server->buffer, _("gnutls: connected using %d-bit Diffie-Hellman shared " "secret exchange"), IRC_SERVER_OPTION_INTEGER (server, IRC_SERVER_OPTION_SSL_DHKEY_SIZE)); if (gnutls_certificate_verify_peers2 (tls_session, &status) < 0) { weechat_printf (server->buffer, _("%sgnutls: error while checking peer's certificate"), weechat_prefix ("error")); rc = -1; } else { /* some checks */ if (status & GNUTLS_CERT_INVALID) { weechat_printf (server->buffer, _("%sgnutls: peer's certificate is NOT trusted"), weechat_prefix ("error")); rc = -1; } else { weechat_printf (server->buffer, _("gnutls: peer's certificate is trusted")); } if (status & GNUTLS_CERT_SIGNER_NOT_FOUND) { weechat_printf (server->buffer, _("%sgnutls: peer's certificate issuer is unknown"), weechat_prefix ("error")); rc = -1; } if (status & GNUTLS_CERT_REVOKED) { weechat_printf (server->buffer, _("%sgnutls: the certificate has been revoked"), weechat_prefix ("error")); rc = -1; } /* check certificates */ if (gnutls_x509_crt_init (&cert_temp) >= 0) { cert_list = gnutls_certificate_get_peers (tls_session, &cert_list_len); if (cert_list) { weechat_printf (server->buffer, NG_("gnutls: receiving %d certificate", "gnutls: receiving %d certificates", cert_list_len), cert_list_len); for (i = 0, j = (int) cert_list_len; i < j; i++) { if (gnutls_x509_crt_import (cert_temp, &cert_list[i], GNUTLS_X509_FMT_DER) >= 0) { /* checking if hostname matches in the first certificate */ if ((i == 0) && (gnutls_x509_crt_check_hostname (cert_temp, hostname) != 0)) { hostname_match = 1; } #if LIBGNUTLS_VERSION_NUMBER >= 0x010706 /* displaying infos about certificate */ #if LIBGNUTLS_VERSION_NUMBER < 0x020400 rinfo = gnutls_x509_crt_print (cert_temp, GNUTLS_X509_CRT_ONELINE, &cinfo); #else rinfo = gnutls_x509_crt_print (cert_temp, GNUTLS_CRT_PRINT_ONELINE, &cinfo); #endif if (rinfo == 0) { weechat_printf (server->buffer, _(" - certificate[%d] info:"), i + 1); weechat_printf (server->buffer, " - %s", cinfo.data); gnutls_free (cinfo.data); } #endif /* check expiration date */ cert_time = gnutls_x509_crt_get_expiration_time (cert_temp); if (cert_time < time (NULL)) { weechat_printf (server->buffer, _("%sgnutls: certificate has expired"), weechat_prefix ("error")); rc = -1; } /* check activation date */ cert_time = gnutls_x509_crt_get_activation_time (cert_temp); if (cert_time > time (NULL)) { weechat_printf (server->buffer, _("%sgnutls: certificate is not yet activated"), weechat_prefix ("error")); rc = -1; } } } if (hostname_match == 0) { weechat_printf (server->buffer, _("%sgnutls: the hostname in the " "certificate does NOT match \"%s\""), weechat_prefix ("error"), hostname); rc = -1; } } } } } else if (action == WEECHAT_HOOK_CONNECT_GNUTLS_CB_SET_CERT) { /* using client certificate if it exists */ cert_path0 = (char *) IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SSL_CERT); if (cert_path0 && cert_path0[0]) { weechat_dir = weechat_info_get ("weechat_dir", ""); cert_path1 = weechat_string_replace (cert_path0, "%h", weechat_dir); cert_path2 = (cert_path1) ? weechat_string_expand_home (cert_path1) : NULL; if (cert_path2) { cert_str = weechat_file_get_content (cert_path2); if (cert_str) { weechat_printf (server->buffer, _("gnutls: sending one certificate")); filedatum.data = (unsigned char *) cert_str; filedatum.size = strlen (cert_str); /* certificate */ gnutls_x509_crt_init (&server->tls_cert); gnutls_x509_crt_import (server->tls_cert, &filedatum, GNUTLS_X509_FMT_PEM); /* key */ gnutls_x509_privkey_init (&server->tls_cert_key); ret = gnutls_x509_privkey_import (server->tls_cert_key, &filedatum, GNUTLS_X509_FMT_PEM); if (ret < 0) { ret = gnutls_x509_privkey_import_pkcs8 (server->tls_cert_key, &filedatum, GNUTLS_X509_FMT_PEM, NULL, GNUTLS_PKCS_PLAIN); } if (ret < 0) { weechat_printf (server->buffer, _("%sgnutls: invalid certificate \"%s\", " "error: %s"), weechat_prefix ("error"), cert_path2, gnutls_strerror (ret)); rc = -1; } else { tls_struct.type = GNUTLS_CRT_X509; tls_struct.ncerts = 1; tls_struct.deinit_all = 0; tls_struct.cert.x509 = &server->tls_cert; tls_struct.key.x509 = server->tls_cert_key; #if LIBGNUTLS_VERSION_NUMBER >= 0x010706 /* client certificate info */ #if LIBGNUTLS_VERSION_NUMBER < 0x020400 rinfo = gnutls_x509_crt_print (server->tls_cert, GNUTLS_X509_CRT_ONELINE, &cinfo); #else rinfo = gnutls_x509_crt_print (server->tls_cert, GNUTLS_CRT_PRINT_ONELINE, &cinfo); #endif if (rinfo == 0) { weechat_printf (server->buffer, _(" - client certificate info (%s):"), cert_path2); weechat_printf (server->buffer, " - %s", cinfo.data); gnutls_free (cinfo.data); } #endif memcpy (answer, &tls_struct, sizeof (gnutls_retr_st)); free (cert_str); } } else { weechat_printf (server->buffer, _("%sgnutls: unable to read certifcate \"%s\""), weechat_prefix ("error"), cert_path2); } } if (cert_path1) free (cert_path1); if (cert_path2) free (cert_path2); } } /* an error should stop the handshake unless the user doesn't care */ return rc; }
164,713
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: StateBase* writeBlob(v8::Handle<v8::Value> value, StateBase* next) { Blob* blob = V8Blob::toNative(value.As<v8::Object>()); if (!blob) return 0; if (blob->hasBeenClosed()) return handleError(DataCloneError, "A Blob object has been closed, and could therefore not be cloned.", next); int blobIndex = -1; m_blobDataHandles.add(blob->uuid(), blob->blobDataHandle()); if (appendBlobInfo(blob->uuid(), blob->type(), blob->size(), &blobIndex)) m_writer.writeBlobIndex(blobIndex); else m_writer.writeBlob(blob->uuid(), blob->type(), blob->size()); return 0; } Commit Message: Replace further questionable HashMap::add usages in bindings BUG=390928 [email protected] Review URL: https://codereview.chromium.org/411273002 git-svn-id: svn://svn.chromium.org/blink/trunk@178823 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID:
StateBase* writeBlob(v8::Handle<v8::Value> value, StateBase* next) { Blob* blob = V8Blob::toNative(value.As<v8::Object>()); if (!blob) return 0; if (blob->hasBeenClosed()) return handleError(DataCloneError, "A Blob object has been closed, and could therefore not be cloned.", next); int blobIndex = -1; m_blobDataHandles.set(blob->uuid(), blob->blobDataHandle()); if (appendBlobInfo(blob->uuid(), blob->type(), blob->size(), &blobIndex)) m_writer.writeBlobIndex(blobIndex); else m_writer.writeBlob(blob->uuid(), blob->type(), blob->size()); return 0; }
171,650
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sock *sk = sock->sk; struct sockaddr_l2 la; int len, err = 0; BT_DBG("sk %p", sk); if (!addr || addr->sa_family != AF_BLUETOOTH) return -EINVAL; memset(&la, 0, sizeof(la)); len = min_t(unsigned int, sizeof(la), alen); memcpy(&la, addr, len); if (la.l2_cid) return -EINVAL; lock_sock(sk); if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) { err = -EINVAL; goto done; } switch (l2cap_pi(sk)->mode) { case L2CAP_MODE_BASIC: break; case L2CAP_MODE_ERTM: if (enable_ertm) break; /* fall through */ default: err = -ENOTSUPP; goto done; } switch (sk->sk_state) { case BT_CONNECT: case BT_CONNECT2: case BT_CONFIG: /* Already connecting */ goto wait; case BT_CONNECTED: /* Already connected */ goto done; case BT_OPEN: case BT_BOUND: /* Can connect */ break; default: err = -EBADFD; goto done; } /* Set destination address and psm */ bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr); l2cap_pi(sk)->psm = la.l2_psm; err = l2cap_do_connect(sk); if (err) goto done; wait: err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); done: release_sock(sk); return err; } Commit Message: Bluetooth: Add configuration support for ERTM and Streaming mode Add support to config_req and config_rsp to configure ERTM and Streaming mode. If the remote device specifies ERTM or Streaming mode, then the same mode is proposed. Otherwise ERTM or Basic mode is used. And in case of a state 2 device, the remote device should propose the same mode. If not, then the channel gets disconnected. Signed-off-by: Gustavo F. Padovan <[email protected]> Signed-off-by: Marcel Holtmann <[email protected]> CWE ID: CWE-119
static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sock *sk = sock->sk; struct sockaddr_l2 la; int len, err = 0; BT_DBG("sk %p", sk); if (!addr || addr->sa_family != AF_BLUETOOTH) return -EINVAL; memset(&la, 0, sizeof(la)); len = min_t(unsigned int, sizeof(la), alen); memcpy(&la, addr, len); if (la.l2_cid) return -EINVAL; lock_sock(sk); if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) { err = -EINVAL; goto done; } switch (l2cap_pi(sk)->mode) { case L2CAP_MODE_BASIC: break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: if (enable_ertm) break; /* fall through */ default: err = -ENOTSUPP; goto done; } switch (sk->sk_state) { case BT_CONNECT: case BT_CONNECT2: case BT_CONFIG: /* Already connecting */ goto wait; case BT_CONNECTED: /* Already connected */ goto done; case BT_OPEN: case BT_BOUND: /* Can connect */ break; default: err = -EBADFD; goto done; } /* Set destination address and psm */ bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr); l2cap_pi(sk)->psm = la.l2_psm; err = l2cap_do_connect(sk); if (err) goto done; wait: err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); done: release_sock(sk); return err; }
167,626
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void CheckClientDownloadRequest::UploadBinary( DownloadCheckResult result, DownloadCheckResultReason reason) { saved_result_ = result; saved_reason_ = reason; bool upload_for_dlp = ShouldUploadForDlpScan(); bool upload_for_malware = ShouldUploadForMalwareScan(reason); auto request = std::make_unique<DownloadItemRequest>( item_, /*read_immediately=*/true, base::BindOnce(&CheckClientDownloadRequest::OnDeepScanningComplete, weakptr_factory_.GetWeakPtr())); Profile* profile = Profile::FromBrowserContext(GetBrowserContext()); if (upload_for_dlp) { DlpDeepScanningClientRequest dlp_request; dlp_request.set_content_source(DlpDeepScanningClientRequest::FILE_DOWNLOAD); request->set_request_dlp_scan(std::move(dlp_request)); } if (upload_for_malware) { MalwareDeepScanningClientRequest malware_request; malware_request.set_population( MalwareDeepScanningClientRequest::POPULATION_ENTERPRISE); malware_request.set_download_token( DownloadProtectionService::GetDownloadPingToken(item_)); request->set_request_malware_scan(std::move(malware_request)); } request->set_dm_token( policy::BrowserDMTokenStorage::Get()->RetrieveDMToken()); service()->UploadForDeepScanning(profile, std::move(request)); } Commit Message: Migrate download_protection code to new DM token class. Migrates RetrieveDMToken calls to use the new BrowserDMToken class. Bug: 1020296 Change-Id: Icef580e243430d73b6c1c42b273a8540277481d9 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1904234 Commit-Queue: Dominique Fauteux-Chapleau <[email protected]> Reviewed-by: Tien Mai <[email protected]> Reviewed-by: Daniel Rubery <[email protected]> Cr-Commit-Position: refs/heads/master@{#714196} CWE ID: CWE-20
void CheckClientDownloadRequest::UploadBinary( DownloadCheckResult result, DownloadCheckResultReason reason) { saved_result_ = result; saved_reason_ = reason; bool upload_for_dlp = ShouldUploadForDlpScan(); bool upload_for_malware = ShouldUploadForMalwareScan(reason); auto request = std::make_unique<DownloadItemRequest>( item_, /*read_immediately=*/true, base::BindOnce(&CheckClientDownloadRequest::OnDeepScanningComplete, weakptr_factory_.GetWeakPtr())); Profile* profile = Profile::FromBrowserContext(GetBrowserContext()); if (upload_for_dlp) { DlpDeepScanningClientRequest dlp_request; dlp_request.set_content_source(DlpDeepScanningClientRequest::FILE_DOWNLOAD); request->set_request_dlp_scan(std::move(dlp_request)); } if (upload_for_malware) { MalwareDeepScanningClientRequest malware_request; malware_request.set_population( MalwareDeepScanningClientRequest::POPULATION_ENTERPRISE); malware_request.set_download_token( DownloadProtectionService::GetDownloadPingToken(item_)); request->set_request_malware_scan(std::move(malware_request)); } auto dm_token = BrowserDMTokenStorage::Get()->RetrieveBrowserDMToken(); DCHECK(dm_token.is_valid()); request->set_dm_token(dm_token.value()); service()->UploadForDeepScanning(profile, std::move(request)); }
172,358
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int read_uids_guids(long long *table_start) { int res, i; int bytes = SQUASHFS_ID_BYTES(sBlk.s.no_ids); int indexes = SQUASHFS_ID_BLOCKS(sBlk.s.no_ids); long long id_index_table[indexes]; TRACE("read_uids_guids: no_ids %d\n", sBlk.s.no_ids); id_table = malloc(bytes); if(id_table == NULL) { ERROR("read_uids_guids: failed to allocate id table\n"); return FALSE; } res = read_fs_bytes(fd, sBlk.s.id_table_start, SQUASHFS_ID_BLOCK_BYTES(sBlk.s.no_ids), id_index_table); if(res == FALSE) { ERROR("read_uids_guids: failed to read id index table\n"); return FALSE; } SQUASHFS_INSWAP_ID_BLOCKS(id_index_table, indexes); /* * id_index_table[0] stores the start of the compressed id blocks. * This by definition is also the end of the previous filesystem * table - this may be the exports table if it is present, or the * fragments table if it isn't. */ *table_start = id_index_table[0]; for(i = 0; i < indexes; i++) { int expected = (i + 1) != indexes ? SQUASHFS_METADATA_SIZE : bytes & (SQUASHFS_METADATA_SIZE - 1); res = read_block(fd, id_index_table[i], NULL, expected, ((char *) id_table) + i * SQUASHFS_METADATA_SIZE); if(res == FALSE) { ERROR("read_uids_guids: failed to read id table block" "\n"); return FALSE; } } SQUASHFS_INSWAP_INTS(id_table, sBlk.s.no_ids); return TRUE; } Commit Message: unsquashfs-4: Add more sanity checks + fix CVE-2015-4645/6 Add more filesystem table sanity checks to Unsquashfs-4 and also properly fix CVE-2015-4645 and CVE-2015-4646. The CVEs were raised due to Unsquashfs having variable oveflow and stack overflow in a number of vulnerable functions. The suggested patch only "fixed" one such function and fixed it badly, and so it was buggy and introduced extra bugs! The suggested patch was not only buggy, but, it used the essentially wrong approach too. It was "fixing" the symptom but not the cause. The symptom is wrong values causing overflow, the cause is filesystem corruption. This corruption should be detected and the filesystem rejected *before* trying to allocate memory. This patch applies the following fixes: 1. The filesystem super-block tables are checked, and the values must match across the filesystem. This will trap corrupted filesystems created by Mksquashfs. 2. The maximum (theorectical) size the filesystem tables could grow to, were analysed, and some variables were increased from int to long long. This analysis has been added as comments. 3. Stack allocation was removed, and a shared buffer (which is checked and increased as necessary) is used to read the table indexes. Signed-off-by: Phillip Lougher <[email protected]> CWE ID: CWE-190
static int read_uids_guids(long long *table_start) static int read_id_table(long long *table_start) { /* * Note on overflow limits: * Size of SBlk.s.no_ids is 2^16 (unsigned short) * Max size of bytes is 2^16*4 or 256K * Max indexes is (2^16*4)/8K or 32 * Max length is ((2^16*4)/8K)*8 or 256 */ int res, i; int bytes = SQUASHFS_ID_BYTES(sBlk.s.no_ids); int indexes = SQUASHFS_ID_BLOCKS(sBlk.s.no_ids); int length = SQUASHFS_ID_BLOCK_BYTES(sBlk.s.no_ids); long long *id_index_table; /* * The size of the index table (length bytes) should match the * table start and end points */ if(length != (*table_start - sBlk.s.id_table_start)) { ERROR("read_id_table: Bad id count in super block\n"); return FALSE; } TRACE("read_id_table: no_ids %d\n", sBlk.s.no_ids); id_index_table = alloc_index_table(indexes); id_table = malloc(bytes); if(id_table == NULL) { ERROR("read_id_table: failed to allocate id table\n"); return FALSE; } res = read_fs_bytes(fd, sBlk.s.id_table_start, length, id_index_table); if(res == FALSE) { ERROR("read_id_table: failed to read id index table\n"); return FALSE; } SQUASHFS_INSWAP_ID_BLOCKS(id_index_table, indexes); /* * id_index_table[0] stores the start of the compressed id blocks. * This by definition is also the end of the previous filesystem * table - this may be the exports table if it is present, or the * fragments table if it isn't. */ *table_start = id_index_table[0]; for(i = 0; i < indexes; i++) { int expected = (i + 1) != indexes ? SQUASHFS_METADATA_SIZE : bytes & (SQUASHFS_METADATA_SIZE - 1); res = read_block(fd, id_index_table[i], NULL, expected, ((char *) id_table) + i * SQUASHFS_METADATA_SIZE); if(res == FALSE) { ERROR("read_id_table: failed to read id table block" "\n"); return FALSE; } } SQUASHFS_INSWAP_INTS(id_table, sBlk.s.no_ids); return TRUE; }
168,882
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: predicate_parse(const char *str, int nr_parens, int nr_preds, parse_pred_fn parse_pred, void *data, struct filter_parse_error *pe) { struct prog_entry *prog_stack; struct prog_entry *prog; const char *ptr = str; char *inverts = NULL; int *op_stack; int *top; int invert = 0; int ret = -ENOMEM; int len; int N = 0; int i; nr_preds += 2; /* For TRUE and FALSE */ op_stack = kmalloc_array(nr_parens, sizeof(*op_stack), GFP_KERNEL); if (!op_stack) return ERR_PTR(-ENOMEM); prog_stack = kmalloc_array(nr_preds, sizeof(*prog_stack), GFP_KERNEL); if (!prog_stack) { parse_error(pe, -ENOMEM, 0); goto out_free; } inverts = kmalloc_array(nr_preds, sizeof(*inverts), GFP_KERNEL); if (!inverts) { parse_error(pe, -ENOMEM, 0); goto out_free; } top = op_stack; prog = prog_stack; *top = 0; /* First pass */ while (*ptr) { /* #1 */ const char *next = ptr++; if (isspace(*next)) continue; switch (*next) { case '(': /* #2 */ if (top - op_stack > nr_parens) return ERR_PTR(-EINVAL); *(++top) = invert; continue; case '!': /* #3 */ if (!is_not(next)) break; invert = !invert; continue; } if (N >= nr_preds) { parse_error(pe, FILT_ERR_TOO_MANY_PREDS, next - str); goto out_free; } inverts[N] = invert; /* #4 */ prog[N].target = N-1; len = parse_pred(next, data, ptr - str, pe, &prog[N].pred); if (len < 0) { ret = len; goto out_free; } ptr = next + len; N++; ret = -1; while (1) { /* #5 */ next = ptr++; if (isspace(*next)) continue; switch (*next) { case ')': case '\0': break; case '&': case '|': if (next[1] == next[0]) { ptr++; break; } default: parse_error(pe, FILT_ERR_TOO_MANY_PREDS, next - str); goto out_free; } invert = *top & INVERT; if (*top & PROCESS_AND) { /* #7 */ update_preds(prog, N - 1, invert); *top &= ~PROCESS_AND; } if (*next == '&') { /* #8 */ *top |= PROCESS_AND; break; } if (*top & PROCESS_OR) { /* #9 */ update_preds(prog, N - 1, !invert); *top &= ~PROCESS_OR; } if (*next == '|') { /* #10 */ *top |= PROCESS_OR; break; } if (!*next) /* #11 */ goto out; if (top == op_stack) { ret = -1; /* Too few '(' */ parse_error(pe, FILT_ERR_TOO_MANY_CLOSE, ptr - str); goto out_free; } top--; /* #12 */ } } out: if (top != op_stack) { /* Too many '(' */ parse_error(pe, FILT_ERR_TOO_MANY_OPEN, ptr - str); goto out_free; } prog[N].pred = NULL; /* #13 */ prog[N].target = 1; /* TRUE */ prog[N+1].pred = NULL; prog[N+1].target = 0; /* FALSE */ prog[N-1].target = N; prog[N-1].when_to_branch = false; /* Second Pass */ for (i = N-1 ; i--; ) { int target = prog[i].target; if (prog[i].when_to_branch == prog[target].when_to_branch) prog[i].target = prog[target].target; } /* Third Pass */ for (i = 0; i < N; i++) { invert = inverts[i] ^ prog[i].when_to_branch; prog[i].when_to_branch = invert; /* Make sure the program always moves forward */ if (WARN_ON(prog[i].target <= i)) { ret = -EINVAL; goto out_free; } } return prog; out_free: kfree(op_stack); kfree(prog_stack); kfree(inverts); return ERR_PTR(ret); } Commit Message: Merge tag 'trace-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace Pull tracing fixes from Steven Rostedt: "This contains a few fixes and a clean up. - a bad merge caused an "endif" to go in the wrong place in scripts/Makefile.build - softirq tracing fix for tracing that corrupts lockdep and causes a false splat - histogram documentation typo fixes - fix a bad memory reference when passing in no filter to the filter code - simplify code by using the swap macro instead of open coding the swap" * tag 'trace-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Fix SKIP_STACK_VALIDATION=1 build due to bad merge with -mrecord-mcount tracing: Fix some errors in histogram documentation tracing: Use swap macro in update_max_tr softirq: Reorder trace_softirqs_on to prevent lockdep splat tracing: Check for no filter when processing event filters CWE ID: CWE-787
predicate_parse(const char *str, int nr_parens, int nr_preds, parse_pred_fn parse_pred, void *data, struct filter_parse_error *pe) { struct prog_entry *prog_stack; struct prog_entry *prog; const char *ptr = str; char *inverts = NULL; int *op_stack; int *top; int invert = 0; int ret = -ENOMEM; int len; int N = 0; int i; nr_preds += 2; /* For TRUE and FALSE */ op_stack = kmalloc_array(nr_parens, sizeof(*op_stack), GFP_KERNEL); if (!op_stack) return ERR_PTR(-ENOMEM); prog_stack = kmalloc_array(nr_preds, sizeof(*prog_stack), GFP_KERNEL); if (!prog_stack) { parse_error(pe, -ENOMEM, 0); goto out_free; } inverts = kmalloc_array(nr_preds, sizeof(*inverts), GFP_KERNEL); if (!inverts) { parse_error(pe, -ENOMEM, 0); goto out_free; } top = op_stack; prog = prog_stack; *top = 0; /* First pass */ while (*ptr) { /* #1 */ const char *next = ptr++; if (isspace(*next)) continue; switch (*next) { case '(': /* #2 */ if (top - op_stack > nr_parens) return ERR_PTR(-EINVAL); *(++top) = invert; continue; case '!': /* #3 */ if (!is_not(next)) break; invert = !invert; continue; } if (N >= nr_preds) { parse_error(pe, FILT_ERR_TOO_MANY_PREDS, next - str); goto out_free; } inverts[N] = invert; /* #4 */ prog[N].target = N-1; len = parse_pred(next, data, ptr - str, pe, &prog[N].pred); if (len < 0) { ret = len; goto out_free; } ptr = next + len; N++; ret = -1; while (1) { /* #5 */ next = ptr++; if (isspace(*next)) continue; switch (*next) { case ')': case '\0': break; case '&': case '|': if (next[1] == next[0]) { ptr++; break; } default: parse_error(pe, FILT_ERR_TOO_MANY_PREDS, next - str); goto out_free; } invert = *top & INVERT; if (*top & PROCESS_AND) { /* #7 */ update_preds(prog, N - 1, invert); *top &= ~PROCESS_AND; } if (*next == '&') { /* #8 */ *top |= PROCESS_AND; break; } if (*top & PROCESS_OR) { /* #9 */ update_preds(prog, N - 1, !invert); *top &= ~PROCESS_OR; } if (*next == '|') { /* #10 */ *top |= PROCESS_OR; break; } if (!*next) /* #11 */ goto out; if (top == op_stack) { ret = -1; /* Too few '(' */ parse_error(pe, FILT_ERR_TOO_MANY_CLOSE, ptr - str); goto out_free; } top--; /* #12 */ } } out: if (top != op_stack) { /* Too many '(' */ parse_error(pe, FILT_ERR_TOO_MANY_OPEN, ptr - str); goto out_free; } if (!N) { /* No program? */ ret = -EINVAL; parse_error(pe, FILT_ERR_NO_FILTER, ptr - str); goto out_free; } prog[N].pred = NULL; /* #13 */ prog[N].target = 1; /* TRUE */ prog[N+1].pred = NULL; prog[N+1].target = 0; /* FALSE */ prog[N-1].target = N; prog[N-1].when_to_branch = false; /* Second Pass */ for (i = N-1 ; i--; ) { int target = prog[i].target; if (prog[i].when_to_branch == prog[target].when_to_branch) prog[i].target = prog[target].target; } /* Third Pass */ for (i = 0; i < N; i++) { invert = inverts[i] ^ prog[i].when_to_branch; prog[i].when_to_branch = invert; /* Make sure the program always moves forward */ if (WARN_ON(prog[i].target <= i)) { ret = -EINVAL; goto out_free; } } return prog; out_free: kfree(op_stack); kfree(prog_stack); kfree(inverts); return ERR_PTR(ret); }
169,186
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: pimv2_addr_print(netdissect_options *ndo, const u_char *bp, enum pimv2_addrtype at, int silent) { int af; int len, hdrlen; ND_TCHECK(bp[0]); if (pimv2_addr_len == 0) { ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; len = sizeof(struct in_addr); break; case 2: af = AF_INET6; len = sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (pimv2_addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } len = pimv2_addr_len; hdrlen = 0; } bp += hdrlen; switch (at) { case pimv2_unicast: ND_TCHECK2(bp[0], len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + len; case pimv2_group: case pimv2_source: ND_TCHECK2(bp[0], len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + len; default: return -1; } trunc: return -1; } Commit Message: CVE-2017-13030/PIM: Redo bounds checks and add length checks. Use ND_TCHECK macros to do bounds checking, and add length checks before the bounds checks. Add a bounds check that the review process found was missing. This fixes a buffer over-read discovered by Bhargava Shastry, SecT/TU Berlin. Add a test using the capture file supplied by the reporter(s), modified so the capture file won't be rejected as an invalid capture. Update one test output file to reflect the changes. CWE ID: CWE-125
pimv2_addr_print(netdissect_options *ndo, const u_char *bp, u_int len, enum pimv2_addrtype at, u_int addr_len, int silent) { int af; int hdrlen; if (addr_len == 0) { if (len < 2) goto trunc; ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; addr_len = (u_int)sizeof(struct in_addr); break; case 2: af = AF_INET6; addr_len = (u_int)sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } hdrlen = 0; } bp += hdrlen; len -= hdrlen; switch (at) { case pimv2_unicast: if (len < addr_len) goto trunc; ND_TCHECK2(bp[0], addr_len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + addr_len; case pimv2_group: case pimv2_source: if (len < addr_len + 2) goto trunc; ND_TCHECK2(bp[0], addr_len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + addr_len; default: return -1; } trunc: return -1; }
167,857
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int mp_pack(lua_State *L) { int nargs = lua_gettop(L); int i; mp_buf *buf; if (nargs == 0) return luaL_argerror(L, 0, "MessagePack pack needs input."); buf = mp_buf_new(L); for(i = 1; i <= nargs; i++) { /* Copy argument i to top of stack for _encode processing; * the encode function pops it from the stack when complete. */ lua_pushvalue(L, i); mp_encode_lua_type(L,buf,0); lua_pushlstring(L,(char*)buf->b,buf->len); /* Reuse the buffer for the next operation by * setting its free count to the total buffer size * and the current position to zero. */ buf->free += buf->len; buf->len = 0; } mp_buf_free(L, buf); /* Concatenate all nargs buffers together */ lua_concat(L, nargs); return 1; } Commit Message: Security: fix Lua cmsgpack library stack overflow. During an auditing effort, the Apple Vulnerability Research team discovered a critical Redis security issue affecting the Lua scripting part of Redis. -- Description of the problem Several years ago I merged a pull request including many small changes at the Lua MsgPack library (that originally I authored myself). The Pull Request entered Redis in commit 90b6337c1, in 2014. Unfortunately one of the changes included a variadic Lua function that lacked the check for the available Lua C stack. As a result, calling the "pack" MsgPack library function with a large number of arguments, results into pushing into the Lua C stack a number of new values proportional to the number of arguments the function was called with. The pushed values, moreover, are controlled by untrusted user input. This in turn causes stack smashing which we believe to be exploitable, while not very deterministic, but it is likely that an exploit could be created targeting specific versions of Redis executables. However at its minimum the issue results in a DoS, crashing the Redis server. -- Versions affected Versions greater or equal to Redis 2.8.18 are affected. -- Reproducing Reproduce with this (based on the original reproduction script by Apple security team): https://gist.github.com/antirez/82445fcbea6d9b19f97014cc6cc79f8a -- Verification of the fix The fix was tested in the following way: 1) I checked that the problem is no longer observable running the trigger. 2) The Lua code was analyzed to understand the stack semantics, and that actually enough stack is allocated in all the cases of mp_pack() calls. 3) The mp_pack() function was modified in order to show exactly what items in the stack were being set, to make sure that there is no silent overflow even after the fix. -- Credits Thank you to the Apple team and to the other persons that helped me checking the patch and coordinating this communication. CWE ID: CWE-119
int mp_pack(lua_State *L) { int nargs = lua_gettop(L); int i; mp_buf *buf; if (nargs == 0) return luaL_argerror(L, 0, "MessagePack pack needs input."); if (!lua_checkstack(L, nargs)) return luaL_argerror(L, 0, "Too many arguments for MessagePack pack."); buf = mp_buf_new(L); for(i = 1; i <= nargs; i++) { /* Copy argument i to top of stack for _encode processing; * the encode function pops it from the stack when complete. */ lua_pushvalue(L, i); mp_encode_lua_type(L,buf,0); lua_pushlstring(L,(char*)buf->b,buf->len); /* Reuse the buffer for the next operation by * setting its free count to the total buffer size * and the current position to zero. */ buf->free += buf->len; buf->len = 0; } mp_buf_free(L, buf); /* Concatenate all nargs buffers together */ lua_concat(L, nargs); return 1; }
170,167
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: DevToolsSession::DevToolsSession(DevToolsAgentHostImpl* agent_host, DevToolsAgentHostClient* client) : binding_(this), agent_host_(agent_host), client_(client), process_host_id_(ChildProcessHost::kInvalidUniqueID), host_(nullptr), dispatcher_(new protocol::UberDispatcher(this)), weak_factory_(this) { dispatcher_->setFallThroughForNotFound(true); } Commit Message: [DevTools] Do not allow chrome.debugger to attach to web ui pages If the page navigates to web ui, we force detach the debugger extension. [email protected] Bug: 798222 Change-Id: Idb46c2f59e839388397a8dfa6ce2e2a897698df3 Reviewed-on: https://chromium-review.googlesource.com/935961 Commit-Queue: Dmitry Gozman <[email protected]> Reviewed-by: Devlin <[email protected]> Reviewed-by: Pavel Feldman <[email protected]> Reviewed-by: Nasko Oskov <[email protected]> Cr-Commit-Position: refs/heads/master@{#540916} CWE ID: CWE-20
DevToolsSession::DevToolsSession(DevToolsAgentHostImpl* agent_host, DevToolsAgentHostClient* client, bool restricted) : binding_(this), agent_host_(agent_host), client_(client), restricted_(restricted), process_host_id_(ChildProcessHost::kInvalidUniqueID), host_(nullptr), dispatcher_(new protocol::UberDispatcher(this)), weak_factory_(this) { dispatcher_->setFallThroughForNotFound(true); }
173,247
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static Image *ExtractPostscript(Image *image,const ImageInfo *image_info, MagickOffsetType PS_Offset,ssize_t PS_Size,ExceptionInfo *exception) { char postscript_file[MaxTextExtent]; const MagicInfo *magic_info; FILE *ps_file; ImageInfo *clone_info; Image *image2; unsigned char magick[2*MaxTextExtent]; if ((clone_info=CloneImageInfo(image_info)) == NULL) return(image); clone_info->blob=(void *) NULL; clone_info->length=0; /* Obtain temporary file */ (void) AcquireUniqueFilename(postscript_file); ps_file=fopen_utf8(postscript_file,"wb"); if (ps_file == (FILE *) NULL) goto FINISH; /* Copy postscript to temporary file */ (void) SeekBlob(image,PS_Offset,SEEK_SET); (void) ReadBlob(image, 2*MaxTextExtent, magick); (void) SeekBlob(image,PS_Offset,SEEK_SET); while(PS_Size-- > 0) { (void) fputc(ReadBlobByte(image),ps_file); } (void) fclose(ps_file); /* Detect file format - Check magic.mgk configuration file. */ magic_info=GetMagicInfo(magick,2*MaxTextExtent,exception); if(magic_info == (const MagicInfo *) NULL) goto FINISH_UNL; /* printf("Detected:%s \n",magic_info->name); */ if(exception->severity != UndefinedException) goto FINISH_UNL; if(magic_info->name == (char *) NULL) goto FINISH_UNL; (void) strncpy(clone_info->magick,magic_info->name,MaxTextExtent); /* Read nested image */ /*FormatString(clone_info->filename,"%s:%s",magic_info->name,postscript_file);*/ FormatLocaleString(clone_info->filename,MaxTextExtent,"%s",postscript_file); image2=ReadImage(clone_info,exception); if (!image2) goto FINISH_UNL; /* Replace current image with new image while copying base image attributes. */ (void) CopyMagickString(image2->filename,image->filename,MaxTextExtent); (void) CopyMagickString(image2->magick_filename,image->magick_filename,MaxTextExtent); (void) CopyMagickString(image2->magick,image->magick,MaxTextExtent); image2->depth=image->depth; DestroyBlob(image2); image2->blob=ReferenceBlob(image->blob); if ((image->rows == 0) || (image->columns == 0)) DeleteImageFromList(&image); AppendImageToList(&image,image2); FINISH_UNL: (void) RelinquishUniqueFileResource(postscript_file); FINISH: DestroyImageInfo(clone_info); return(image); } Commit Message: ... CWE ID: CWE-189
static Image *ExtractPostscript(Image *image,const ImageInfo *image_info, MagickOffsetType PS_Offset,ssize_t PS_Size,ExceptionInfo *exception) { char postscript_file[MaxTextExtent]; const MagicInfo *magic_info; FILE *ps_file; ImageInfo *clone_info; Image *image2; unsigned char magick[2*MaxTextExtent]; if ((clone_info=CloneImageInfo(image_info)) == NULL) return(image); clone_info->blob=(void *) NULL; clone_info->length=0; /* Obtain temporary file */ (void) AcquireUniqueFilename(postscript_file); ps_file=fopen_utf8(postscript_file,"wb"); if (ps_file == (FILE *) NULL) goto FINISH; /* Copy postscript to temporary file */ (void) SeekBlob(image,PS_Offset,SEEK_SET); (void) ReadBlob(image, 2*MaxTextExtent, magick); (void) SeekBlob(image,PS_Offset,SEEK_SET); while(PS_Size-- > 0) { (void) fputc(ReadBlobByte(image),ps_file); } (void) fclose(ps_file); /* Detect file format - Check magic.mgk configuration file. */ magic_info=GetMagicInfo(magick,2*MaxTextExtent,exception); if(magic_info == (const MagicInfo *) NULL) goto FINISH_UNL; /* printf("Detected:%s \n",magic_info->name); */ if(exception->severity != UndefinedException) goto FINISH_UNL; if(magic_info->name == (char *) NULL) goto FINISH_UNL; (void) strncpy(clone_info->magick,magic_info->name,MaxTextExtent-1); /* Read nested image */ /*FormatString(clone_info->filename,"%s:%s",magic_info->name,postscript_file);*/ FormatLocaleString(clone_info->filename,MaxTextExtent,"%s",postscript_file); image2=ReadImage(clone_info,exception); if (!image2) goto FINISH_UNL; /* Replace current image with new image while copying base image attributes. */ (void) CopyMagickString(image2->filename,image->filename,MaxTextExtent); (void) CopyMagickString(image2->magick_filename,image->magick_filename,MaxTextExtent); (void) CopyMagickString(image2->magick,image->magick,MaxTextExtent); image2->depth=image->depth; DestroyBlob(image2); image2->blob=ReferenceBlob(image->blob); if ((image->rows == 0) || (image->columns == 0)) DeleteImageFromList(&image); AppendImageToList(&image,image2); FINISH_UNL: (void) RelinquishUniqueFileResource(postscript_file); FINISH: DestroyImageInfo(clone_info); return(image); }
168,524
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); int val, valbool; int retv = -ENOPROTOOPT; bool needs_rtnl = setsockopt_needs_rtnl(optname); if (!optval) val = 0; else { if (optlen >= sizeof(int)) { if (get_user(val, (int __user *) optval)) return -EFAULT; } else val = 0; } valbool = (val != 0); if (ip6_mroute_opt(optname)) return ip6_mroute_setsockopt(sk, optname, optval, optlen); if (needs_rtnl) rtnl_lock(); lock_sock(sk); switch (optname) { case IPV6_ADDRFORM: if (optlen < sizeof(int)) goto e_inval; if (val == PF_INET) { struct ipv6_txoptions *opt; struct sk_buff *pktopt; if (sk->sk_type == SOCK_RAW) break; if (sk->sk_protocol == IPPROTO_UDP || sk->sk_protocol == IPPROTO_UDPLITE) { struct udp_sock *up = udp_sk(sk); if (up->pending == AF_INET6) { retv = -EBUSY; break; } } else if (sk->sk_protocol != IPPROTO_TCP) break; if (sk->sk_state != TCP_ESTABLISHED) { retv = -ENOTCONN; break; } if (ipv6_only_sock(sk) || !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) { retv = -EADDRNOTAVAIL; break; } fl6_free_socklist(sk); ipv6_sock_mc_close(sk); /* * Sock is moving from IPv6 to IPv4 (sk_prot), so * remove it from the refcnt debug socks count in the * original family... */ sk_refcnt_debug_dec(sk); if (sk->sk_protocol == IPPROTO_TCP) { struct inet_connection_sock *icsk = inet_csk(sk); local_bh_disable(); sock_prot_inuse_add(net, sk->sk_prot, -1); sock_prot_inuse_add(net, &tcp_prot, 1); local_bh_enable(); sk->sk_prot = &tcp_prot; icsk->icsk_af_ops = &ipv4_specific; sk->sk_socket->ops = &inet_stream_ops; sk->sk_family = PF_INET; tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); } else { struct proto *prot = &udp_prot; if (sk->sk_protocol == IPPROTO_UDPLITE) prot = &udplite_prot; local_bh_disable(); sock_prot_inuse_add(net, sk->sk_prot, -1); sock_prot_inuse_add(net, prot, 1); local_bh_enable(); sk->sk_prot = prot; sk->sk_socket->ops = &inet_dgram_ops; sk->sk_family = PF_INET; } opt = xchg(&np->opt, NULL); if (opt) sock_kfree_s(sk, opt, opt->tot_len); pktopt = xchg(&np->pktoptions, NULL); kfree_skb(pktopt); sk->sk_destruct = inet_sock_destruct; /* * ... and add it to the refcnt debug socks count * in the new family. -acme */ sk_refcnt_debug_inc(sk); module_put(THIS_MODULE); retv = 0; break; } goto e_inval; case IPV6_V6ONLY: if (optlen < sizeof(int) || inet_sk(sk)->inet_num) goto e_inval; sk->sk_ipv6only = valbool; retv = 0; break; case IPV6_RECVPKTINFO: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxinfo = valbool; retv = 0; break; case IPV6_2292PKTINFO: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxoinfo = valbool; retv = 0; break; case IPV6_RECVHOPLIMIT: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxhlim = valbool; retv = 0; break; case IPV6_2292HOPLIMIT: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxohlim = valbool; retv = 0; break; case IPV6_RECVRTHDR: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.srcrt = valbool; retv = 0; break; case IPV6_2292RTHDR: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.osrcrt = valbool; retv = 0; break; case IPV6_RECVHOPOPTS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.hopopts = valbool; retv = 0; break; case IPV6_2292HOPOPTS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.ohopopts = valbool; retv = 0; break; case IPV6_RECVDSTOPTS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.dstopts = valbool; retv = 0; break; case IPV6_2292DSTOPTS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.odstopts = valbool; retv = 0; break; case IPV6_TCLASS: if (optlen < sizeof(int)) goto e_inval; if (val < -1 || val > 0xff) goto e_inval; /* RFC 3542, 6.5: default traffic class of 0x0 */ if (val == -1) val = 0; np->tclass = val; retv = 0; break; case IPV6_RECVTCLASS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxtclass = valbool; retv = 0; break; case IPV6_FLOWINFO: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxflow = valbool; retv = 0; break; case IPV6_RECVPATHMTU: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxpmtu = valbool; retv = 0; break; case IPV6_TRANSPARENT: if (valbool && !ns_capable(net->user_ns, CAP_NET_ADMIN) && !ns_capable(net->user_ns, CAP_NET_RAW)) { retv = -EPERM; break; } if (optlen < sizeof(int)) goto e_inval; /* we don't have a separate transparent bit for IPV6 we use the one in the IPv4 socket */ inet_sk(sk)->transparent = valbool; retv = 0; break; case IPV6_RECVORIGDSTADDR: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxorigdstaddr = valbool; retv = 0; break; case IPV6_HOPOPTS: case IPV6_RTHDRDSTOPTS: case IPV6_RTHDR: case IPV6_DSTOPTS: { struct ipv6_txoptions *opt; /* remove any sticky options header with a zero option * length, per RFC3542. */ if (optlen == 0) optval = NULL; else if (!optval) goto e_inval; else if (optlen < sizeof(struct ipv6_opt_hdr) || optlen & 0x7 || optlen > 8 * 255) goto e_inval; /* hop-by-hop / destination options are privileged option */ retv = -EPERM; if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) break; opt = ipv6_renew_options(sk, np->opt, optname, (struct ipv6_opt_hdr __user *)optval, optlen); if (IS_ERR(opt)) { retv = PTR_ERR(opt); break; } /* routing header option needs extra check */ retv = -EINVAL; if (optname == IPV6_RTHDR && opt && opt->srcrt) { struct ipv6_rt_hdr *rthdr = opt->srcrt; switch (rthdr->type) { #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPV6_SRCRT_TYPE_2: if (rthdr->hdrlen != 2 || rthdr->segments_left != 1) goto sticky_done; break; #endif default: goto sticky_done; } } retv = 0; opt = ipv6_update_options(sk, opt); sticky_done: if (opt) sock_kfree_s(sk, opt, opt->tot_len); break; } case IPV6_PKTINFO: { struct in6_pktinfo pkt; if (optlen == 0) goto e_inval; else if (optlen < sizeof(struct in6_pktinfo) || !optval) goto e_inval; if (copy_from_user(&pkt, optval, sizeof(struct in6_pktinfo))) { retv = -EFAULT; break; } if (sk->sk_bound_dev_if && pkt.ipi6_ifindex != sk->sk_bound_dev_if) goto e_inval; np->sticky_pktinfo.ipi6_ifindex = pkt.ipi6_ifindex; np->sticky_pktinfo.ipi6_addr = pkt.ipi6_addr; retv = 0; break; } case IPV6_2292PKTOPTIONS: { struct ipv6_txoptions *opt = NULL; struct msghdr msg; struct flowi6 fl6; int junk; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; if (optlen == 0) goto update; /* 1K is probably excessive * 1K is surely not enough, 2K per standard header is 16K. */ retv = -EINVAL; if (optlen > 64*1024) break; opt = sock_kmalloc(sk, sizeof(*opt) + optlen, GFP_KERNEL); retv = -ENOBUFS; if (!opt) break; memset(opt, 0, sizeof(*opt)); opt->tot_len = sizeof(*opt) + optlen; retv = -EFAULT; if (copy_from_user(opt+1, optval, optlen)) goto done; msg.msg_controllen = optlen; msg.msg_control = (void *)(opt+1); retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, &junk); if (retv) goto done; update: retv = 0; opt = ipv6_update_options(sk, opt); done: if (opt) sock_kfree_s(sk, opt, opt->tot_len); break; } case IPV6_UNICAST_HOPS: if (optlen < sizeof(int)) goto e_inval; if (val > 255 || val < -1) goto e_inval; np->hop_limit = val; retv = 0; break; case IPV6_MULTICAST_HOPS: if (sk->sk_type == SOCK_STREAM) break; if (optlen < sizeof(int)) goto e_inval; if (val > 255 || val < -1) goto e_inval; np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val); retv = 0; break; case IPV6_MULTICAST_LOOP: if (optlen < sizeof(int)) goto e_inval; if (val != valbool) goto e_inval; np->mc_loop = valbool; retv = 0; break; case IPV6_UNICAST_IF: { struct net_device *dev = NULL; int ifindex; if (optlen != sizeof(int)) goto e_inval; ifindex = (__force int)ntohl((__force __be32)val); if (ifindex == 0) { np->ucast_oif = 0; retv = 0; break; } dev = dev_get_by_index(net, ifindex); retv = -EADDRNOTAVAIL; if (!dev) break; dev_put(dev); retv = -EINVAL; if (sk->sk_bound_dev_if) break; np->ucast_oif = ifindex; retv = 0; break; } case IPV6_MULTICAST_IF: if (sk->sk_type == SOCK_STREAM) break; if (optlen < sizeof(int)) goto e_inval; if (val) { struct net_device *dev; if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val) goto e_inval; dev = dev_get_by_index(net, val); if (!dev) { retv = -ENODEV; break; } dev_put(dev); } np->mcast_oif = val; retv = 0; break; case IPV6_ADD_MEMBERSHIP: case IPV6_DROP_MEMBERSHIP: { struct ipv6_mreq mreq; if (optlen < sizeof(struct ipv6_mreq)) goto e_inval; retv = -EPROTO; if (inet_sk(sk)->is_icsk) break; retv = -EFAULT; if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq))) break; if (optname == IPV6_ADD_MEMBERSHIP) retv = ipv6_sock_mc_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr); else retv = ipv6_sock_mc_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr); break; } case IPV6_JOIN_ANYCAST: case IPV6_LEAVE_ANYCAST: { struct ipv6_mreq mreq; if (optlen < sizeof(struct ipv6_mreq)) goto e_inval; retv = -EFAULT; if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq))) break; if (optname == IPV6_JOIN_ANYCAST) retv = ipv6_sock_ac_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr); else retv = ipv6_sock_ac_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr); break; } case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: { struct group_req greq; struct sockaddr_in6 *psin6; if (optlen < sizeof(struct group_req)) goto e_inval; retv = -EFAULT; if (copy_from_user(&greq, optval, sizeof(struct group_req))) break; if (greq.gr_group.ss_family != AF_INET6) { retv = -EADDRNOTAVAIL; break; } psin6 = (struct sockaddr_in6 *)&greq.gr_group; if (optname == MCAST_JOIN_GROUP) retv = ipv6_sock_mc_join(sk, greq.gr_interface, &psin6->sin6_addr); else retv = ipv6_sock_mc_drop(sk, greq.gr_interface, &psin6->sin6_addr); break; } case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: { struct group_source_req greqs; int omode, add; if (optlen < sizeof(struct group_source_req)) goto e_inval; if (copy_from_user(&greqs, optval, sizeof(greqs))) { retv = -EFAULT; break; } if (greqs.gsr_group.ss_family != AF_INET6 || greqs.gsr_source.ss_family != AF_INET6) { retv = -EADDRNOTAVAIL; break; } if (optname == MCAST_BLOCK_SOURCE) { omode = MCAST_EXCLUDE; add = 1; } else if (optname == MCAST_UNBLOCK_SOURCE) { omode = MCAST_EXCLUDE; add = 0; } else if (optname == MCAST_JOIN_SOURCE_GROUP) { struct sockaddr_in6 *psin6; psin6 = (struct sockaddr_in6 *)&greqs.gsr_group; retv = ipv6_sock_mc_join(sk, greqs.gsr_interface, &psin6->sin6_addr); /* prior join w/ different source is ok */ if (retv && retv != -EADDRINUSE) break; omode = MCAST_INCLUDE; add = 1; } else /* MCAST_LEAVE_SOURCE_GROUP */ { omode = MCAST_INCLUDE; add = 0; } retv = ip6_mc_source(add, omode, sk, &greqs); break; } case MCAST_MSFILTER: { struct group_filter *gsf; if (optlen < GROUP_FILTER_SIZE(0)) goto e_inval; if (optlen > sysctl_optmem_max) { retv = -ENOBUFS; break; } gsf = kmalloc(optlen, GFP_KERNEL); if (!gsf) { retv = -ENOBUFS; break; } retv = -EFAULT; if (copy_from_user(gsf, optval, optlen)) { kfree(gsf); break; } /* numsrc >= (4G-140)/128 overflow in 32 bits */ if (gsf->gf_numsrc >= 0x1ffffffU || gsf->gf_numsrc > sysctl_mld_max_msf) { kfree(gsf); retv = -ENOBUFS; break; } if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) { kfree(gsf); retv = -EINVAL; break; } retv = ip6_mc_msfilter(sk, gsf); kfree(gsf); break; } case IPV6_ROUTER_ALERT: if (optlen < sizeof(int)) goto e_inval; retv = ip6_ra_control(sk, val); break; case IPV6_MTU_DISCOVER: if (optlen < sizeof(int)) goto e_inval; if (val < IPV6_PMTUDISC_DONT || val > IPV6_PMTUDISC_OMIT) goto e_inval; np->pmtudisc = val; retv = 0; break; case IPV6_MTU: if (optlen < sizeof(int)) goto e_inval; if (val && val < IPV6_MIN_MTU) goto e_inval; np->frag_size = val; retv = 0; break; case IPV6_RECVERR: if (optlen < sizeof(int)) goto e_inval; np->recverr = valbool; if (!val) skb_queue_purge(&sk->sk_error_queue); retv = 0; break; case IPV6_FLOWINFO_SEND: if (optlen < sizeof(int)) goto e_inval; np->sndflow = valbool; retv = 0; break; case IPV6_FLOWLABEL_MGR: retv = ipv6_flowlabel_opt(sk, optval, optlen); break; case IPV6_IPSEC_POLICY: case IPV6_XFRM_POLICY: retv = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) break; retv = xfrm_user_policy(sk, optname, optval, optlen); break; case IPV6_ADDR_PREFERENCES: { unsigned int pref = 0; unsigned int prefmask = ~0; if (optlen < sizeof(int)) goto e_inval; retv = -EINVAL; /* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */ switch (val & (IPV6_PREFER_SRC_PUBLIC| IPV6_PREFER_SRC_TMP| IPV6_PREFER_SRC_PUBTMP_DEFAULT)) { case IPV6_PREFER_SRC_PUBLIC: pref |= IPV6_PREFER_SRC_PUBLIC; break; case IPV6_PREFER_SRC_TMP: pref |= IPV6_PREFER_SRC_TMP; break; case IPV6_PREFER_SRC_PUBTMP_DEFAULT: break; case 0: goto pref_skip_pubtmp; default: goto e_inval; } prefmask &= ~(IPV6_PREFER_SRC_PUBLIC| IPV6_PREFER_SRC_TMP); pref_skip_pubtmp: /* check HOME/COA conflicts */ switch (val & (IPV6_PREFER_SRC_HOME|IPV6_PREFER_SRC_COA)) { case IPV6_PREFER_SRC_HOME: break; case IPV6_PREFER_SRC_COA: pref |= IPV6_PREFER_SRC_COA; case 0: goto pref_skip_coa; default: goto e_inval; } prefmask &= ~IPV6_PREFER_SRC_COA; pref_skip_coa: /* check CGA/NONCGA conflicts */ switch (val & (IPV6_PREFER_SRC_CGA|IPV6_PREFER_SRC_NONCGA)) { case IPV6_PREFER_SRC_CGA: case IPV6_PREFER_SRC_NONCGA: case 0: break; default: goto e_inval; } np->srcprefs = (np->srcprefs & prefmask) | pref; retv = 0; break; } case IPV6_MINHOPCOUNT: if (optlen < sizeof(int)) goto e_inval; if (val < 0 || val > 255) goto e_inval; np->min_hopcount = val; retv = 0; break; case IPV6_DONTFRAG: np->dontfrag = valbool; retv = 0; break; case IPV6_AUTOFLOWLABEL: np->autoflowlabel = valbool; retv = 0; break; } release_sock(sk); if (needs_rtnl) rtnl_unlock(); return retv; e_inval: release_sock(sk); if (needs_rtnl) rtnl_unlock(); return -EINVAL; } Commit Message: ipv6: add complete rcu protection around np->opt This patch addresses multiple problems : UDP/RAW sendmsg() need to get a stable struct ipv6_txoptions while socket is not locked : Other threads can change np->opt concurrently. Dmitry posted a syzkaller (http://github.com/google/syzkaller) program desmonstrating use-after-free. Starting with TCP/DCCP lockless listeners, tcp_v6_syn_recv_sock() and dccp_v6_request_recv_sock() also need to use RCU protection to dereference np->opt once (before calling ipv6_dup_options()) This patch adds full RCU protection to np->opt Reported-by: Dmitry Vyukov <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Acked-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-416
static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); int val, valbool; int retv = -ENOPROTOOPT; bool needs_rtnl = setsockopt_needs_rtnl(optname); if (!optval) val = 0; else { if (optlen >= sizeof(int)) { if (get_user(val, (int __user *) optval)) return -EFAULT; } else val = 0; } valbool = (val != 0); if (ip6_mroute_opt(optname)) return ip6_mroute_setsockopt(sk, optname, optval, optlen); if (needs_rtnl) rtnl_lock(); lock_sock(sk); switch (optname) { case IPV6_ADDRFORM: if (optlen < sizeof(int)) goto e_inval; if (val == PF_INET) { struct ipv6_txoptions *opt; struct sk_buff *pktopt; if (sk->sk_type == SOCK_RAW) break; if (sk->sk_protocol == IPPROTO_UDP || sk->sk_protocol == IPPROTO_UDPLITE) { struct udp_sock *up = udp_sk(sk); if (up->pending == AF_INET6) { retv = -EBUSY; break; } } else if (sk->sk_protocol != IPPROTO_TCP) break; if (sk->sk_state != TCP_ESTABLISHED) { retv = -ENOTCONN; break; } if (ipv6_only_sock(sk) || !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) { retv = -EADDRNOTAVAIL; break; } fl6_free_socklist(sk); ipv6_sock_mc_close(sk); /* * Sock is moving from IPv6 to IPv4 (sk_prot), so * remove it from the refcnt debug socks count in the * original family... */ sk_refcnt_debug_dec(sk); if (sk->sk_protocol == IPPROTO_TCP) { struct inet_connection_sock *icsk = inet_csk(sk); local_bh_disable(); sock_prot_inuse_add(net, sk->sk_prot, -1); sock_prot_inuse_add(net, &tcp_prot, 1); local_bh_enable(); sk->sk_prot = &tcp_prot; icsk->icsk_af_ops = &ipv4_specific; sk->sk_socket->ops = &inet_stream_ops; sk->sk_family = PF_INET; tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); } else { struct proto *prot = &udp_prot; if (sk->sk_protocol == IPPROTO_UDPLITE) prot = &udplite_prot; local_bh_disable(); sock_prot_inuse_add(net, sk->sk_prot, -1); sock_prot_inuse_add(net, prot, 1); local_bh_enable(); sk->sk_prot = prot; sk->sk_socket->ops = &inet_dgram_ops; sk->sk_family = PF_INET; } opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL); if (opt) { atomic_sub(opt->tot_len, &sk->sk_omem_alloc); txopt_put(opt); } pktopt = xchg(&np->pktoptions, NULL); kfree_skb(pktopt); sk->sk_destruct = inet_sock_destruct; /* * ... and add it to the refcnt debug socks count * in the new family. -acme */ sk_refcnt_debug_inc(sk); module_put(THIS_MODULE); retv = 0; break; } goto e_inval; case IPV6_V6ONLY: if (optlen < sizeof(int) || inet_sk(sk)->inet_num) goto e_inval; sk->sk_ipv6only = valbool; retv = 0; break; case IPV6_RECVPKTINFO: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxinfo = valbool; retv = 0; break; case IPV6_2292PKTINFO: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxoinfo = valbool; retv = 0; break; case IPV6_RECVHOPLIMIT: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxhlim = valbool; retv = 0; break; case IPV6_2292HOPLIMIT: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxohlim = valbool; retv = 0; break; case IPV6_RECVRTHDR: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.srcrt = valbool; retv = 0; break; case IPV6_2292RTHDR: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.osrcrt = valbool; retv = 0; break; case IPV6_RECVHOPOPTS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.hopopts = valbool; retv = 0; break; case IPV6_2292HOPOPTS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.ohopopts = valbool; retv = 0; break; case IPV6_RECVDSTOPTS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.dstopts = valbool; retv = 0; break; case IPV6_2292DSTOPTS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.odstopts = valbool; retv = 0; break; case IPV6_TCLASS: if (optlen < sizeof(int)) goto e_inval; if (val < -1 || val > 0xff) goto e_inval; /* RFC 3542, 6.5: default traffic class of 0x0 */ if (val == -1) val = 0; np->tclass = val; retv = 0; break; case IPV6_RECVTCLASS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxtclass = valbool; retv = 0; break; case IPV6_FLOWINFO: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxflow = valbool; retv = 0; break; case IPV6_RECVPATHMTU: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxpmtu = valbool; retv = 0; break; case IPV6_TRANSPARENT: if (valbool && !ns_capable(net->user_ns, CAP_NET_ADMIN) && !ns_capable(net->user_ns, CAP_NET_RAW)) { retv = -EPERM; break; } if (optlen < sizeof(int)) goto e_inval; /* we don't have a separate transparent bit for IPV6 we use the one in the IPv4 socket */ inet_sk(sk)->transparent = valbool; retv = 0; break; case IPV6_RECVORIGDSTADDR: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxorigdstaddr = valbool; retv = 0; break; case IPV6_HOPOPTS: case IPV6_RTHDRDSTOPTS: case IPV6_RTHDR: case IPV6_DSTOPTS: { struct ipv6_txoptions *opt; /* remove any sticky options header with a zero option * length, per RFC3542. */ if (optlen == 0) optval = NULL; else if (!optval) goto e_inval; else if (optlen < sizeof(struct ipv6_opt_hdr) || optlen & 0x7 || optlen > 8 * 255) goto e_inval; /* hop-by-hop / destination options are privileged option */ retv = -EPERM; if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) break; opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); opt = ipv6_renew_options(sk, opt, optname, (struct ipv6_opt_hdr __user *)optval, optlen); if (IS_ERR(opt)) { retv = PTR_ERR(opt); break; } /* routing header option needs extra check */ retv = -EINVAL; if (optname == IPV6_RTHDR && opt && opt->srcrt) { struct ipv6_rt_hdr *rthdr = opt->srcrt; switch (rthdr->type) { #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPV6_SRCRT_TYPE_2: if (rthdr->hdrlen != 2 || rthdr->segments_left != 1) goto sticky_done; break; #endif default: goto sticky_done; } } retv = 0; opt = ipv6_update_options(sk, opt); sticky_done: if (opt) { atomic_sub(opt->tot_len, &sk->sk_omem_alloc); txopt_put(opt); } break; } case IPV6_PKTINFO: { struct in6_pktinfo pkt; if (optlen == 0) goto e_inval; else if (optlen < sizeof(struct in6_pktinfo) || !optval) goto e_inval; if (copy_from_user(&pkt, optval, sizeof(struct in6_pktinfo))) { retv = -EFAULT; break; } if (sk->sk_bound_dev_if && pkt.ipi6_ifindex != sk->sk_bound_dev_if) goto e_inval; np->sticky_pktinfo.ipi6_ifindex = pkt.ipi6_ifindex; np->sticky_pktinfo.ipi6_addr = pkt.ipi6_addr; retv = 0; break; } case IPV6_2292PKTOPTIONS: { struct ipv6_txoptions *opt = NULL; struct msghdr msg; struct flowi6 fl6; int junk; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; if (optlen == 0) goto update; /* 1K is probably excessive * 1K is surely not enough, 2K per standard header is 16K. */ retv = -EINVAL; if (optlen > 64*1024) break; opt = sock_kmalloc(sk, sizeof(*opt) + optlen, GFP_KERNEL); retv = -ENOBUFS; if (!opt) break; memset(opt, 0, sizeof(*opt)); atomic_set(&opt->refcnt, 1); opt->tot_len = sizeof(*opt) + optlen; retv = -EFAULT; if (copy_from_user(opt+1, optval, optlen)) goto done; msg.msg_controllen = optlen; msg.msg_control = (void *)(opt+1); retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, &junk); if (retv) goto done; update: retv = 0; opt = ipv6_update_options(sk, opt); done: if (opt) { atomic_sub(opt->tot_len, &sk->sk_omem_alloc); txopt_put(opt); } break; } case IPV6_UNICAST_HOPS: if (optlen < sizeof(int)) goto e_inval; if (val > 255 || val < -1) goto e_inval; np->hop_limit = val; retv = 0; break; case IPV6_MULTICAST_HOPS: if (sk->sk_type == SOCK_STREAM) break; if (optlen < sizeof(int)) goto e_inval; if (val > 255 || val < -1) goto e_inval; np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val); retv = 0; break; case IPV6_MULTICAST_LOOP: if (optlen < sizeof(int)) goto e_inval; if (val != valbool) goto e_inval; np->mc_loop = valbool; retv = 0; break; case IPV6_UNICAST_IF: { struct net_device *dev = NULL; int ifindex; if (optlen != sizeof(int)) goto e_inval; ifindex = (__force int)ntohl((__force __be32)val); if (ifindex == 0) { np->ucast_oif = 0; retv = 0; break; } dev = dev_get_by_index(net, ifindex); retv = -EADDRNOTAVAIL; if (!dev) break; dev_put(dev); retv = -EINVAL; if (sk->sk_bound_dev_if) break; np->ucast_oif = ifindex; retv = 0; break; } case IPV6_MULTICAST_IF: if (sk->sk_type == SOCK_STREAM) break; if (optlen < sizeof(int)) goto e_inval; if (val) { struct net_device *dev; if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val) goto e_inval; dev = dev_get_by_index(net, val); if (!dev) { retv = -ENODEV; break; } dev_put(dev); } np->mcast_oif = val; retv = 0; break; case IPV6_ADD_MEMBERSHIP: case IPV6_DROP_MEMBERSHIP: { struct ipv6_mreq mreq; if (optlen < sizeof(struct ipv6_mreq)) goto e_inval; retv = -EPROTO; if (inet_sk(sk)->is_icsk) break; retv = -EFAULT; if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq))) break; if (optname == IPV6_ADD_MEMBERSHIP) retv = ipv6_sock_mc_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr); else retv = ipv6_sock_mc_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr); break; } case IPV6_JOIN_ANYCAST: case IPV6_LEAVE_ANYCAST: { struct ipv6_mreq mreq; if (optlen < sizeof(struct ipv6_mreq)) goto e_inval; retv = -EFAULT; if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq))) break; if (optname == IPV6_JOIN_ANYCAST) retv = ipv6_sock_ac_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr); else retv = ipv6_sock_ac_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr); break; } case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: { struct group_req greq; struct sockaddr_in6 *psin6; if (optlen < sizeof(struct group_req)) goto e_inval; retv = -EFAULT; if (copy_from_user(&greq, optval, sizeof(struct group_req))) break; if (greq.gr_group.ss_family != AF_INET6) { retv = -EADDRNOTAVAIL; break; } psin6 = (struct sockaddr_in6 *)&greq.gr_group; if (optname == MCAST_JOIN_GROUP) retv = ipv6_sock_mc_join(sk, greq.gr_interface, &psin6->sin6_addr); else retv = ipv6_sock_mc_drop(sk, greq.gr_interface, &psin6->sin6_addr); break; } case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: { struct group_source_req greqs; int omode, add; if (optlen < sizeof(struct group_source_req)) goto e_inval; if (copy_from_user(&greqs, optval, sizeof(greqs))) { retv = -EFAULT; break; } if (greqs.gsr_group.ss_family != AF_INET6 || greqs.gsr_source.ss_family != AF_INET6) { retv = -EADDRNOTAVAIL; break; } if (optname == MCAST_BLOCK_SOURCE) { omode = MCAST_EXCLUDE; add = 1; } else if (optname == MCAST_UNBLOCK_SOURCE) { omode = MCAST_EXCLUDE; add = 0; } else if (optname == MCAST_JOIN_SOURCE_GROUP) { struct sockaddr_in6 *psin6; psin6 = (struct sockaddr_in6 *)&greqs.gsr_group; retv = ipv6_sock_mc_join(sk, greqs.gsr_interface, &psin6->sin6_addr); /* prior join w/ different source is ok */ if (retv && retv != -EADDRINUSE) break; omode = MCAST_INCLUDE; add = 1; } else /* MCAST_LEAVE_SOURCE_GROUP */ { omode = MCAST_INCLUDE; add = 0; } retv = ip6_mc_source(add, omode, sk, &greqs); break; } case MCAST_MSFILTER: { struct group_filter *gsf; if (optlen < GROUP_FILTER_SIZE(0)) goto e_inval; if (optlen > sysctl_optmem_max) { retv = -ENOBUFS; break; } gsf = kmalloc(optlen, GFP_KERNEL); if (!gsf) { retv = -ENOBUFS; break; } retv = -EFAULT; if (copy_from_user(gsf, optval, optlen)) { kfree(gsf); break; } /* numsrc >= (4G-140)/128 overflow in 32 bits */ if (gsf->gf_numsrc >= 0x1ffffffU || gsf->gf_numsrc > sysctl_mld_max_msf) { kfree(gsf); retv = -ENOBUFS; break; } if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) { kfree(gsf); retv = -EINVAL; break; } retv = ip6_mc_msfilter(sk, gsf); kfree(gsf); break; } case IPV6_ROUTER_ALERT: if (optlen < sizeof(int)) goto e_inval; retv = ip6_ra_control(sk, val); break; case IPV6_MTU_DISCOVER: if (optlen < sizeof(int)) goto e_inval; if (val < IPV6_PMTUDISC_DONT || val > IPV6_PMTUDISC_OMIT) goto e_inval; np->pmtudisc = val; retv = 0; break; case IPV6_MTU: if (optlen < sizeof(int)) goto e_inval; if (val && val < IPV6_MIN_MTU) goto e_inval; np->frag_size = val; retv = 0; break; case IPV6_RECVERR: if (optlen < sizeof(int)) goto e_inval; np->recverr = valbool; if (!val) skb_queue_purge(&sk->sk_error_queue); retv = 0; break; case IPV6_FLOWINFO_SEND: if (optlen < sizeof(int)) goto e_inval; np->sndflow = valbool; retv = 0; break; case IPV6_FLOWLABEL_MGR: retv = ipv6_flowlabel_opt(sk, optval, optlen); break; case IPV6_IPSEC_POLICY: case IPV6_XFRM_POLICY: retv = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) break; retv = xfrm_user_policy(sk, optname, optval, optlen); break; case IPV6_ADDR_PREFERENCES: { unsigned int pref = 0; unsigned int prefmask = ~0; if (optlen < sizeof(int)) goto e_inval; retv = -EINVAL; /* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */ switch (val & (IPV6_PREFER_SRC_PUBLIC| IPV6_PREFER_SRC_TMP| IPV6_PREFER_SRC_PUBTMP_DEFAULT)) { case IPV6_PREFER_SRC_PUBLIC: pref |= IPV6_PREFER_SRC_PUBLIC; break; case IPV6_PREFER_SRC_TMP: pref |= IPV6_PREFER_SRC_TMP; break; case IPV6_PREFER_SRC_PUBTMP_DEFAULT: break; case 0: goto pref_skip_pubtmp; default: goto e_inval; } prefmask &= ~(IPV6_PREFER_SRC_PUBLIC| IPV6_PREFER_SRC_TMP); pref_skip_pubtmp: /* check HOME/COA conflicts */ switch (val & (IPV6_PREFER_SRC_HOME|IPV6_PREFER_SRC_COA)) { case IPV6_PREFER_SRC_HOME: break; case IPV6_PREFER_SRC_COA: pref |= IPV6_PREFER_SRC_COA; case 0: goto pref_skip_coa; default: goto e_inval; } prefmask &= ~IPV6_PREFER_SRC_COA; pref_skip_coa: /* check CGA/NONCGA conflicts */ switch (val & (IPV6_PREFER_SRC_CGA|IPV6_PREFER_SRC_NONCGA)) { case IPV6_PREFER_SRC_CGA: case IPV6_PREFER_SRC_NONCGA: case 0: break; default: goto e_inval; } np->srcprefs = (np->srcprefs & prefmask) | pref; retv = 0; break; } case IPV6_MINHOPCOUNT: if (optlen < sizeof(int)) goto e_inval; if (val < 0 || val > 255) goto e_inval; np->min_hopcount = val; retv = 0; break; case IPV6_DONTFRAG: np->dontfrag = valbool; retv = 0; break; case IPV6_AUTOFLOWLABEL: np->autoflowlabel = valbool; retv = 0; break; } release_sock(sk); if (needs_rtnl) rtnl_unlock(); return retv; e_inval: release_sock(sk); if (needs_rtnl) rtnl_unlock(); return -EINVAL; }
167,336
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void LocalFileSystem::fileSystemNotAvailable( PassRefPtrWillBeRawPtr<ExecutionContext> context, PassRefPtr<CallbackWrapper> callbacks) { context->postTask(createCrossThreadTask(&reportFailure, callbacks->release(), FileError::ABORT_ERR)); } Commit Message: Oilpan: Ship Oilpan for SyncCallbackHelper, CreateFileResult and CallbackWrapper in filesystem/ These are leftovers when we shipped Oilpan for filesystem/ once. BUG=340522 Review URL: https://codereview.chromium.org/501263003 git-svn-id: svn://svn.chromium.org/blink/trunk@180909 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-119
void LocalFileSystem::fileSystemNotAvailable( PassRefPtrWillBeRawPtr<ExecutionContext> context, CallbackWrapper* callbacks) { context->postTask(createCrossThreadTask(&reportFailure, callbacks->release(), FileError::ABORT_ERR)); }
171,428
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int handle(int s, unsigned char* data, int len, struct sockaddr_in *s_in) { char buf[2048]; unsigned short *cmd = (unsigned short *)buf; int plen; struct in_addr *addr = &s_in->sin_addr; unsigned short *pid = (unsigned short*) data; /* inet check */ if (len == S_HELLO_LEN && memcmp(data, "sorbo", 5) == 0) { unsigned short *id = (unsigned short*) (data+5); int x = 2+4+2; *cmd = htons(S_CMD_INET_CHECK); memcpy(cmd+1, addr, 4); memcpy(cmd+1+2, id, 2); printf("Inet check by %s %d\n", inet_ntoa(*addr), ntohs(*id)); if (send(s, buf, x, 0) != x) return 1; return 0; } *cmd++ = htons(S_CMD_PACKET); *cmd++ = *pid; plen = len - 2; last_id = ntohs(*pid); if (last_id > 20000) wrap = 1; if (wrap && last_id < 100) { wrap = 0; memset(ids, 0, sizeof(ids)); } printf("Got packet %d %d", last_id, plen); if (is_dup(last_id)) { printf(" (DUP)\n"); return 0; } printf("\n"); *cmd++ = htons(plen); memcpy(cmd, data+2, plen); plen += 2 + 2 + 2; assert(plen <= (int) sizeof(buf)); if (send(s, buf, plen, 0) != plen) return 1; return 0; } Commit Message: Buddy-ng: Fixed segmentation fault (Closes #15 on GitHub). git-svn-id: http://svn.aircrack-ng.org/trunk@2418 28c6078b-6c39-48e3-add9-af49d547ecab CWE ID: CWE-20
int handle(int s, unsigned char* data, int len, struct sockaddr_in *s_in) { char buf[2048]; unsigned short *cmd = (unsigned short *)buf; int plen; struct in_addr *addr = &s_in->sin_addr; unsigned short *pid = (unsigned short*) data; /* inet check */ if (len == S_HELLO_LEN && memcmp(data, "sorbo", 5) == 0) { unsigned short *id = (unsigned short*) (data+5); int x = 2+4+2; *cmd = htons(S_CMD_INET_CHECK); memcpy(cmd+1, addr, 4); memcpy(cmd+1+2, id, 2); printf("Inet check by %s %d\n", inet_ntoa(*addr), ntohs(*id)); if (send(s, buf, x, 0) != x) return 1; return 0; } *cmd++ = htons(S_CMD_PACKET); *cmd++ = *pid; plen = len - 2; if (plen < 0) return 0; last_id = ntohs(*pid); if (last_id > 20000) wrap = 1; if (wrap && last_id < 100) { wrap = 0; memset(ids, 0, sizeof(ids)); } printf("Got packet %d %d", last_id, plen); if (is_dup(last_id)) { printf(" (DUP)\n"); return 0; } printf("\n"); *cmd++ = htons(plen); memcpy(cmd, data+2, plen); plen += 2 + 2 + 2; assert(plen <= (int) sizeof(buf)); if (send(s, buf, plen, 0) != plen) return 1; return 0; }
168,913
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int module_load( YR_SCAN_CONTEXT* context, YR_OBJECT* module_object, void* module_data, size_t module_data_size) { set_integer(1, module_object, "constants.one"); set_integer(2, module_object, "constants.two"); set_string("foo", module_object, "constants.foo"); set_string("", module_object, "constants.empty"); set_integer(1, module_object, "struct_array[1].i"); set_integer(0, module_object, "integer_array[%i]", 0); set_integer(1, module_object, "integer_array[%i]", 1); set_integer(2, module_object, "integer_array[%i]", 2); set_string("foo", module_object, "string_array[%i]", 0); set_string("bar", module_object, "string_array[%i]", 1); set_string("baz", module_object, "string_array[%i]", 2); set_sized_string("foo\0bar", 7, module_object, "string_array[%i]", 3); set_string("foo", module_object, "string_dict[%s]", "foo"); set_string("bar", module_object, "string_dict[\"bar\"]"); set_string("foo", module_object, "struct_dict[%s].s", "foo"); set_integer(1, module_object, "struct_dict[%s].i", "foo"); return ERROR_SUCCESS; } Commit Message: Fix heap overflow (reported by Jurriaan Bremer) When setting a new array item with yr_object_array_set_item() the array size is doubled if the index for the new item is larger than the already allocated ones. No further checks were made to ensure that the index fits into the array after doubling its capacity. If the array capacity was for example 64, and a new object is assigned to an index larger than 128 the overflow occurs. As yr_object_array_set_item() is usually invoked with indexes that increase monotonically by one, this bug never triggered before. But the new "dotnet" module has the potential to allow the exploitation of this bug by scanning a specially crafted .NET binary. CWE ID: CWE-119
int module_load( YR_SCAN_CONTEXT* context, YR_OBJECT* module_object, void* module_data, size_t module_data_size) { set_integer(1, module_object, "constants.one"); set_integer(2, module_object, "constants.two"); set_string("foo", module_object, "constants.foo"); set_string("", module_object, "constants.empty"); set_integer(1, module_object, "struct_array[1].i"); set_integer(0, module_object, "integer_array[%i]", 0); set_integer(1, module_object, "integer_array[%i]", 1); set_integer(2, module_object, "integer_array[%i]", 2); set_integer(256, module_object, "integer_array[%i]", 256); set_string("foo", module_object, "string_array[%i]", 0); set_string("bar", module_object, "string_array[%i]", 1); set_string("baz", module_object, "string_array[%i]", 2); set_sized_string("foo\0bar", 7, module_object, "string_array[%i]", 3); set_string("foo", module_object, "string_dict[%s]", "foo"); set_string("bar", module_object, "string_dict[\"bar\"]"); set_string("foo", module_object, "struct_dict[%s].s", "foo"); set_integer(1, module_object, "struct_dict[%s].i", "foo"); return ERROR_SUCCESS; }
168,044
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void QuotaManager::GetAvailableSpace(const AvailableSpaceCallback& callback) { if (is_incognito_) { callback.Run(kQuotaStatusOk, kIncognitoDefaultTemporaryQuota); return; } make_scoped_refptr(new AvailableSpaceQueryTask(this, callback))->Start(); } Commit Message: Wipe out QuotaThreadTask. This is a one of a series of refactoring patches for QuotaManager. http://codereview.chromium.org/10872054/ http://codereview.chromium.org/10917060/ BUG=139270 Review URL: https://chromiumcodereview.appspot.com/10919070 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@154987 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
void QuotaManager::GetAvailableSpace(const AvailableSpaceCallback& callback) { if (is_incognito_) { callback.Run(kQuotaStatusOk, kIncognitoDefaultTemporaryQuota); return; } PostTaskAndReplyWithResult( db_thread_, FROM_HERE, base::Bind(get_disk_space_fn_, profile_path_), base::Bind(&QuotaManager::DidGetAvailableSpace, weak_factory_.GetWeakPtr(), callback)); }
170,669
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: parse_rock_ridge_inode_internal(struct iso_directory_record *de, struct inode *inode, int regard_xa) { int symlink_len = 0; int cnt, sig; struct inode *reloc; struct rock_ridge *rr; int rootflag; struct rock_state rs; int ret = 0; if (!ISOFS_SB(inode->i_sb)->s_rock) return 0; init_rock_state(&rs, inode); setup_rock_ridge(de, inode, &rs); if (regard_xa) { rs.chr += 14; rs.len -= 14; if (rs.len < 0) rs.len = 0; } repeat: while (rs.len > 2) { /* There may be one byte for padding somewhere */ rr = (struct rock_ridge *)rs.chr; /* * Ignore rock ridge info if rr->len is out of range, but * don't return -EIO because that would make the file * invisible. */ if (rr->len < 3) goto out; /* Something got screwed up here */ sig = isonum_721(rs.chr); if (rock_check_overflow(&rs, sig)) goto eio; rs.chr += rr->len; rs.len -= rr->len; /* * As above, just ignore the rock ridge info if rr->len * is bogus. */ if (rs.len < 0) goto out; /* Something got screwed up here */ switch (sig) { #ifndef CONFIG_ZISOFS /* No flag for SF or ZF */ case SIG('R', 'R'): if ((rr->u.RR.flags[0] & (RR_PX | RR_TF | RR_SL | RR_CL)) == 0) goto out; break; #endif case SIG('S', 'P'): if (check_sp(rr, inode)) goto out; break; case SIG('C', 'E'): rs.cont_extent = isonum_733(rr->u.CE.extent); rs.cont_offset = isonum_733(rr->u.CE.offset); rs.cont_size = isonum_733(rr->u.CE.size); break; case SIG('E', 'R'): ISOFS_SB(inode->i_sb)->s_rock = 1; printk(KERN_DEBUG "ISO 9660 Extensions: "); { int p; for (p = 0; p < rr->u.ER.len_id; p++) printk("%c", rr->u.ER.data[p]); } printk("\n"); break; case SIG('P', 'X'): inode->i_mode = isonum_733(rr->u.PX.mode); set_nlink(inode, isonum_733(rr->u.PX.n_links)); i_uid_write(inode, isonum_733(rr->u.PX.uid)); i_gid_write(inode, isonum_733(rr->u.PX.gid)); break; case SIG('P', 'N'): { int high, low; high = isonum_733(rr->u.PN.dev_high); low = isonum_733(rr->u.PN.dev_low); /* * The Rock Ridge standard specifies that if * sizeof(dev_t) <= 4, then the high field is * unused, and the device number is completely * stored in the low field. Some writers may * ignore this subtlety, * and as a result we test to see if the entire * device number is * stored in the low field, and use that. */ if ((low & ~0xff) && high == 0) { inode->i_rdev = MKDEV(low >> 8, low & 0xff); } else { inode->i_rdev = MKDEV(high, low); } } break; case SIG('T', 'F'): /* * Some RRIP writers incorrectly place ctime in the * TF_CREATE field. Try to handle this correctly for * either case. */ /* Rock ridge never appears on a High Sierra disk */ cnt = 0; if (rr->u.TF.flags & TF_CREATE) { inode->i_ctime.tv_sec = iso_date(rr->u.TF.times[cnt++].time, 0); inode->i_ctime.tv_nsec = 0; } if (rr->u.TF.flags & TF_MODIFY) { inode->i_mtime.tv_sec = iso_date(rr->u.TF.times[cnt++].time, 0); inode->i_mtime.tv_nsec = 0; } if (rr->u.TF.flags & TF_ACCESS) { inode->i_atime.tv_sec = iso_date(rr->u.TF.times[cnt++].time, 0); inode->i_atime.tv_nsec = 0; } if (rr->u.TF.flags & TF_ATTRIBUTES) { inode->i_ctime.tv_sec = iso_date(rr->u.TF.times[cnt++].time, 0); inode->i_ctime.tv_nsec = 0; } break; case SIG('S', 'L'): { int slen; struct SL_component *slp; struct SL_component *oldslp; slen = rr->len - 5; slp = &rr->u.SL.link; inode->i_size = symlink_len; while (slen > 1) { rootflag = 0; switch (slp->flags & ~1) { case 0: inode->i_size += slp->len; break; case 2: inode->i_size += 1; break; case 4: inode->i_size += 2; break; case 8: rootflag = 1; inode->i_size += 1; break; default: printk("Symlink component flag " "not implemented\n"); } slen -= slp->len + 2; oldslp = slp; slp = (struct SL_component *) (((char *)slp) + slp->len + 2); if (slen < 2) { if (((rr->u.SL. flags & 1) != 0) && ((oldslp-> flags & 1) == 0)) inode->i_size += 1; break; } /* * If this component record isn't * continued, then append a '/'. */ if (!rootflag && (oldslp->flags & 1) == 0) inode->i_size += 1; } } symlink_len = inode->i_size; break; case SIG('R', 'E'): printk(KERN_WARNING "Attempt to read inode for " "relocated directory\n"); goto out; case SIG('C', 'L'): ISOFS_I(inode)->i_first_extent = isonum_733(rr->u.CL.location); reloc = isofs_iget(inode->i_sb, ISOFS_I(inode)->i_first_extent, 0); if (IS_ERR(reloc)) { ret = PTR_ERR(reloc); goto out; } inode->i_mode = reloc->i_mode; set_nlink(inode, reloc->i_nlink); inode->i_uid = reloc->i_uid; inode->i_gid = reloc->i_gid; inode->i_rdev = reloc->i_rdev; inode->i_size = reloc->i_size; inode->i_blocks = reloc->i_blocks; inode->i_atime = reloc->i_atime; inode->i_ctime = reloc->i_ctime; inode->i_mtime = reloc->i_mtime; iput(reloc); break; #ifdef CONFIG_ZISOFS case SIG('Z', 'F'): { int algo; if (ISOFS_SB(inode->i_sb)->s_nocompress) break; algo = isonum_721(rr->u.ZF.algorithm); if (algo == SIG('p', 'z')) { int block_shift = isonum_711(&rr->u.ZF.parms[1]); if (block_shift > 17) { printk(KERN_WARNING "isofs: " "Can't handle ZF block " "size of 2^%d\n", block_shift); } else { /* * Note: we don't change * i_blocks here */ ISOFS_I(inode)->i_file_format = isofs_file_compressed; /* * Parameters to compression * algorithm (header size, * block size) */ ISOFS_I(inode)->i_format_parm[0] = isonum_711(&rr->u.ZF.parms[0]); ISOFS_I(inode)->i_format_parm[1] = isonum_711(&rr->u.ZF.parms[1]); inode->i_size = isonum_733(rr->u.ZF. real_size); } } else { printk(KERN_WARNING "isofs: Unknown ZF compression " "algorithm: %c%c\n", rr->u.ZF.algorithm[0], rr->u.ZF.algorithm[1]); } break; } #endif default: break; } } ret = rock_continue(&rs); if (ret == 0) goto repeat; if (ret == 1) ret = 0; out: kfree(rs.buffer); return ret; eio: ret = -EIO; goto out; } Commit Message: isofs: Fix unbounded recursion when processing relocated directories We did not check relocated directory in any way when processing Rock Ridge 'CL' tag. Thus a corrupted isofs image can possibly have a CL entry pointing to another CL entry leading to possibly unbounded recursion in kernel code and thus stack overflow or deadlocks (if there is a loop created from CL entries). Fix the problem by not allowing CL entry to point to a directory entry with CL entry (such use makes no good sense anyway) and by checking whether CL entry doesn't point to itself. CC: [email protected] Reported-by: Chris Evans <[email protected]> Signed-off-by: Jan Kara <[email protected]> CWE ID: CWE-20
parse_rock_ridge_inode_internal(struct iso_directory_record *de, struct inode *inode, int flags) { int symlink_len = 0; int cnt, sig; unsigned int reloc_block; struct inode *reloc; struct rock_ridge *rr; int rootflag; struct rock_state rs; int ret = 0; if (!ISOFS_SB(inode->i_sb)->s_rock) return 0; init_rock_state(&rs, inode); setup_rock_ridge(de, inode, &rs); if (flags & RR_REGARD_XA) { rs.chr += 14; rs.len -= 14; if (rs.len < 0) rs.len = 0; } repeat: while (rs.len > 2) { /* There may be one byte for padding somewhere */ rr = (struct rock_ridge *)rs.chr; /* * Ignore rock ridge info if rr->len is out of range, but * don't return -EIO because that would make the file * invisible. */ if (rr->len < 3) goto out; /* Something got screwed up here */ sig = isonum_721(rs.chr); if (rock_check_overflow(&rs, sig)) goto eio; rs.chr += rr->len; rs.len -= rr->len; /* * As above, just ignore the rock ridge info if rr->len * is bogus. */ if (rs.len < 0) goto out; /* Something got screwed up here */ switch (sig) { #ifndef CONFIG_ZISOFS /* No flag for SF or ZF */ case SIG('R', 'R'): if ((rr->u.RR.flags[0] & (RR_PX | RR_TF | RR_SL | RR_CL)) == 0) goto out; break; #endif case SIG('S', 'P'): if (check_sp(rr, inode)) goto out; break; case SIG('C', 'E'): rs.cont_extent = isonum_733(rr->u.CE.extent); rs.cont_offset = isonum_733(rr->u.CE.offset); rs.cont_size = isonum_733(rr->u.CE.size); break; case SIG('E', 'R'): ISOFS_SB(inode->i_sb)->s_rock = 1; printk(KERN_DEBUG "ISO 9660 Extensions: "); { int p; for (p = 0; p < rr->u.ER.len_id; p++) printk("%c", rr->u.ER.data[p]); } printk("\n"); break; case SIG('P', 'X'): inode->i_mode = isonum_733(rr->u.PX.mode); set_nlink(inode, isonum_733(rr->u.PX.n_links)); i_uid_write(inode, isonum_733(rr->u.PX.uid)); i_gid_write(inode, isonum_733(rr->u.PX.gid)); break; case SIG('P', 'N'): { int high, low; high = isonum_733(rr->u.PN.dev_high); low = isonum_733(rr->u.PN.dev_low); /* * The Rock Ridge standard specifies that if * sizeof(dev_t) <= 4, then the high field is * unused, and the device number is completely * stored in the low field. Some writers may * ignore this subtlety, * and as a result we test to see if the entire * device number is * stored in the low field, and use that. */ if ((low & ~0xff) && high == 0) { inode->i_rdev = MKDEV(low >> 8, low & 0xff); } else { inode->i_rdev = MKDEV(high, low); } } break; case SIG('T', 'F'): /* * Some RRIP writers incorrectly place ctime in the * TF_CREATE field. Try to handle this correctly for * either case. */ /* Rock ridge never appears on a High Sierra disk */ cnt = 0; if (rr->u.TF.flags & TF_CREATE) { inode->i_ctime.tv_sec = iso_date(rr->u.TF.times[cnt++].time, 0); inode->i_ctime.tv_nsec = 0; } if (rr->u.TF.flags & TF_MODIFY) { inode->i_mtime.tv_sec = iso_date(rr->u.TF.times[cnt++].time, 0); inode->i_mtime.tv_nsec = 0; } if (rr->u.TF.flags & TF_ACCESS) { inode->i_atime.tv_sec = iso_date(rr->u.TF.times[cnt++].time, 0); inode->i_atime.tv_nsec = 0; } if (rr->u.TF.flags & TF_ATTRIBUTES) { inode->i_ctime.tv_sec = iso_date(rr->u.TF.times[cnt++].time, 0); inode->i_ctime.tv_nsec = 0; } break; case SIG('S', 'L'): { int slen; struct SL_component *slp; struct SL_component *oldslp; slen = rr->len - 5; slp = &rr->u.SL.link; inode->i_size = symlink_len; while (slen > 1) { rootflag = 0; switch (slp->flags & ~1) { case 0: inode->i_size += slp->len; break; case 2: inode->i_size += 1; break; case 4: inode->i_size += 2; break; case 8: rootflag = 1; inode->i_size += 1; break; default: printk("Symlink component flag " "not implemented\n"); } slen -= slp->len + 2; oldslp = slp; slp = (struct SL_component *) (((char *)slp) + slp->len + 2); if (slen < 2) { if (((rr->u.SL. flags & 1) != 0) && ((oldslp-> flags & 1) == 0)) inode->i_size += 1; break; } /* * If this component record isn't * continued, then append a '/'. */ if (!rootflag && (oldslp->flags & 1) == 0) inode->i_size += 1; } } symlink_len = inode->i_size; break; case SIG('R', 'E'): printk(KERN_WARNING "Attempt to read inode for " "relocated directory\n"); goto out; case SIG('C', 'L'): if (flags & RR_RELOC_DE) { printk(KERN_ERR "ISOFS: Recursive directory relocation " "is not supported\n"); goto eio; } reloc_block = isonum_733(rr->u.CL.location); if (reloc_block == ISOFS_I(inode)->i_iget5_block && ISOFS_I(inode)->i_iget5_offset == 0) { printk(KERN_ERR "ISOFS: Directory relocation points to " "itself\n"); goto eio; } ISOFS_I(inode)->i_first_extent = reloc_block; reloc = isofs_iget_reloc(inode->i_sb, reloc_block, 0); if (IS_ERR(reloc)) { ret = PTR_ERR(reloc); goto out; } inode->i_mode = reloc->i_mode; set_nlink(inode, reloc->i_nlink); inode->i_uid = reloc->i_uid; inode->i_gid = reloc->i_gid; inode->i_rdev = reloc->i_rdev; inode->i_size = reloc->i_size; inode->i_blocks = reloc->i_blocks; inode->i_atime = reloc->i_atime; inode->i_ctime = reloc->i_ctime; inode->i_mtime = reloc->i_mtime; iput(reloc); break; #ifdef CONFIG_ZISOFS case SIG('Z', 'F'): { int algo; if (ISOFS_SB(inode->i_sb)->s_nocompress) break; algo = isonum_721(rr->u.ZF.algorithm); if (algo == SIG('p', 'z')) { int block_shift = isonum_711(&rr->u.ZF.parms[1]); if (block_shift > 17) { printk(KERN_WARNING "isofs: " "Can't handle ZF block " "size of 2^%d\n", block_shift); } else { /* * Note: we don't change * i_blocks here */ ISOFS_I(inode)->i_file_format = isofs_file_compressed; /* * Parameters to compression * algorithm (header size, * block size) */ ISOFS_I(inode)->i_format_parm[0] = isonum_711(&rr->u.ZF.parms[0]); ISOFS_I(inode)->i_format_parm[1] = isonum_711(&rr->u.ZF.parms[1]); inode->i_size = isonum_733(rr->u.ZF. real_size); } } else { printk(KERN_WARNING "isofs: Unknown ZF compression " "algorithm: %c%c\n", rr->u.ZF.algorithm[0], rr->u.ZF.algorithm[1]); } break; } #endif default: break; } } ret = rock_continue(&rs); if (ret == 0) goto repeat; if (ret == 1) ret = 0; out: kfree(rs.buffer); return ret; eio: ret = -EIO; goto out; }
166,271
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void InspectorNetworkAgent::WillSendRequest( ExecutionContext* execution_context, unsigned long identifier, DocumentLoader* loader, ResourceRequest& request, const ResourceResponse& redirect_response, const FetchInitiatorInfo& initiator_info) { if (initiator_info.name == FetchInitiatorTypeNames::internal) return; if (initiator_info.name == FetchInitiatorTypeNames::document && loader->GetSubstituteData().IsValid()) return; protocol::DictionaryValue* headers = state_->getObject(NetworkAgentState::kExtraRequestHeaders); if (headers) { for (size_t i = 0; i < headers->size(); ++i) { auto header = headers->at(i); String value; if (header.second->asString(&value)) request.SetHTTPHeaderField(AtomicString(header.first), AtomicString(value)); } } request.SetReportRawHeaders(true); if (state_->booleanProperty(NetworkAgentState::kCacheDisabled, false)) { if (LoadsFromCacheOnly(request) && request.GetRequestContext() != WebURLRequest::kRequestContextInternal) { request.SetCachePolicy(WebCachePolicy::kBypassCacheLoadOnlyFromCache); } else { request.SetCachePolicy(WebCachePolicy::kBypassingCache); } request.SetShouldResetAppCache(true); } if (state_->booleanProperty(NetworkAgentState::kBypassServiceWorker, false)) request.SetServiceWorkerMode(WebURLRequest::ServiceWorkerMode::kNone); WillSendRequestInternal(execution_context, identifier, loader, request, redirect_response, initiator_info); if (!host_id_.IsEmpty()) { request.AddHTTPHeaderField( HTTPNames::X_DevTools_Emulate_Network_Conditions_Client_Id, AtomicString(host_id_)); } } Commit Message: DevTools: send proper resource type in Network.RequestWillBeSent This patch plumbs resoure type into the DispatchWillSendRequest instrumenation. This allows us to report accurate type in Network.RequestWillBeSent event, instead of "Other", that we report today. BUG=765501 R=dgozman Change-Id: I0134c08b841e8dd247fdc8ff208bfd51e462709c Reviewed-on: https://chromium-review.googlesource.com/667504 Reviewed-by: Pavel Feldman <[email protected]> Reviewed-by: Dmitry Gozman <[email protected]> Commit-Queue: Andrey Lushnikov <[email protected]> Cr-Commit-Position: refs/heads/master@{#507936} CWE ID: CWE-119
void InspectorNetworkAgent::WillSendRequest( ExecutionContext* execution_context, unsigned long identifier, DocumentLoader* loader, ResourceRequest& request, const ResourceResponse& redirect_response, const FetchInitiatorInfo& initiator_info, Resource::Type resource_type) { if (initiator_info.name == FetchInitiatorTypeNames::internal) return; if (initiator_info.name == FetchInitiatorTypeNames::document && loader->GetSubstituteData().IsValid()) return; protocol::DictionaryValue* headers = state_->getObject(NetworkAgentState::kExtraRequestHeaders); if (headers) { for (size_t i = 0; i < headers->size(); ++i) { auto header = headers->at(i); String value; if (header.second->asString(&value)) request.SetHTTPHeaderField(AtomicString(header.first), AtomicString(value)); } } request.SetReportRawHeaders(true); if (state_->booleanProperty(NetworkAgentState::kCacheDisabled, false)) { if (LoadsFromCacheOnly(request) && request.GetRequestContext() != WebURLRequest::kRequestContextInternal) { request.SetCachePolicy(WebCachePolicy::kBypassCacheLoadOnlyFromCache); } else { request.SetCachePolicy(WebCachePolicy::kBypassingCache); } request.SetShouldResetAppCache(true); } if (state_->booleanProperty(NetworkAgentState::kBypassServiceWorker, false)) request.SetServiceWorkerMode(WebURLRequest::ServiceWorkerMode::kNone); InspectorPageAgent::ResourceType type = InspectorPageAgent::ToResourceType(resource_type); WillSendRequestInternal(execution_context, identifier, loader, request, redirect_response, initiator_info, type); if (!host_id_.IsEmpty()) { request.AddHTTPHeaderField( HTTPNames::X_DevTools_Emulate_Network_Conditions_Client_Id, AtomicString(host_id_)); } }
172,467
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static bool tight_can_send_png_rect(VncState *vs, int w, int h) { if (vs->tight.type != VNC_ENCODING_TIGHT_PNG) { return false; } if (ds_get_bytes_per_pixel(vs->ds) == 1 || vs->clientds.pf.bytes_per_pixel == 1) { return false; } return true; } Commit Message: CWE ID: CWE-125
static bool tight_can_send_png_rect(VncState *vs, int w, int h) { if (vs->tight.type != VNC_ENCODING_TIGHT_PNG) { return false; } if (ds_get_bytes_per_pixel(vs->ds) == 1 || vs->client_pf.bytes_per_pixel == 1) { return false; } return true; }
165,463
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static PassRefPtrWillBeRawPtr<CreateFileResult> create() { return adoptRefWillBeNoop(new CreateFileResult()); } Commit Message: Oilpan: Ship Oilpan for SyncCallbackHelper, CreateFileResult and CallbackWrapper in filesystem/ These are leftovers when we shipped Oilpan for filesystem/ once. BUG=340522 Review URL: https://codereview.chromium.org/501263003 git-svn-id: svn://svn.chromium.org/blink/trunk@180909 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-119
static PassRefPtrWillBeRawPtr<CreateFileResult> create() static CreateFileResult* create() { return new CreateFileResult(); }
171,413
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void *arm_coherent_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); void *memory; if (dma_alloc_from_coherent(dev, size, handle, &memory)) return memory; return __dma_alloc(dev, size, handle, gfp, prot, true, __builtin_return_address(0)); } Commit Message: ARM: dma-mapping: don't allow DMA mappings to be marked executable DMA mapping permissions were being derived from pgprot_kernel directly without using PAGE_KERNEL. This causes them to be marked with executable permission, which is not what we want. Fix this. Signed-off-by: Russell King <[email protected]> CWE ID: CWE-264
static void *arm_coherent_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); void *memory; if (dma_alloc_from_coherent(dev, size, handle, &memory)) return memory; return __dma_alloc(dev, size, handle, gfp, prot, true, __builtin_return_address(0)); }
167,577
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int gdTransformAffineGetImage(gdImagePtr *dst, const gdImagePtr src, gdRectPtr src_area, const double affine[6]) { int res; double m[6]; gdRect bbox; gdRect area_full; if (src_area == NULL) { area_full.x = 0; area_full.y = 0; area_full.width = gdImageSX(src); area_full.height = gdImageSY(src); src_area = &area_full; } gdTransformAffineBoundingBox(src_area, affine, &bbox); *dst = gdImageCreateTrueColor(bbox.width, bbox.height); if (*dst == NULL) { return GD_FALSE; } (*dst)->saveAlphaFlag = 1; if (!src->trueColor) { gdImagePaletteToTrueColor(src); } /* Translate to dst origin (0,0) */ gdAffineTranslate(m, -bbox.x, -bbox.y); gdAffineConcat(m, affine, m); gdImageAlphaBlending(*dst, 0); res = gdTransformAffineCopy(*dst, 0,0, src, src_area, m); if (res != GD_TRUE) { gdImageDestroy(*dst); dst = NULL; return GD_FALSE; } else { return GD_TRUE; } } Commit Message: Fixed bug #72227: imagescale out-of-bounds read Ported from https://github.com/libgd/libgd/commit/4f65a3e4eedaffa1efcf9ee1eb08f0b504fbc31a CWE ID: CWE-125
int gdTransformAffineGetImage(gdImagePtr *dst, const gdImagePtr src, gdRectPtr src_area, const double affine[6]) { int res; double m[6]; gdRect bbox; gdRect area_full; if (src_area == NULL) { area_full.x = 0; area_full.y = 0; area_full.width = gdImageSX(src); area_full.height = gdImageSY(src); src_area = &area_full; } gdTransformAffineBoundingBox(src_area, affine, &bbox); *dst = gdImageCreateTrueColor(bbox.width, bbox.height); if (*dst == NULL) { return GD_FALSE; } (*dst)->saveAlphaFlag = 1; if (!src->trueColor) { gdImagePaletteToTrueColor(src); } /* Translate to dst origin (0,0) */ gdAffineTranslate(m, -bbox.x, -bbox.y); gdAffineConcat(m, affine, m); gdImageAlphaBlending(*dst, 0); res = gdTransformAffineCopy(*dst, 0,0, src, src_area, m); if (res != GD_TRUE) { gdImageDestroy(*dst); dst = NULL; return GD_FALSE; } else { return GD_TRUE; } }
170,007
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: const CuePoint* Cues::GetFirst() const { if (m_cue_points == NULL) return NULL; if (m_count == 0) return NULL; #if 0 LoadCuePoint(); //init cues const size_t count = m_count + m_preload_count; if (count == 0) //weird return NULL; #endif CuePoint* const* const pp = m_cue_points; assert(pp); CuePoint* const pCP = pp[0]; assert(pCP); assert(pCP->GetTimeCode() >= 0); return pCP; } Commit Message: external/libvpx/libwebm: Update snapshot Update libwebm snapshot. This update contains security fixes from upstream. Upstream git hash: 229f49347d19b0ca0941e072b199a242ef6c5f2b BUG=23167726 Change-Id: Id3e140e7b31ae11294724b1ecfe2e9c83b4d4207 (cherry picked from commit d0281a15b3c6bd91756e453cc9398c5ef412d99a) CWE ID: CWE-20
const CuePoint* Cues::GetFirst() const { if (m_cue_points == NULL || m_count == 0) return NULL; CuePoint* const* const pp = m_cue_points; if (pp == NULL) return NULL; CuePoint* const pCP = pp[0]; if (pCP == NULL || pCP->GetTimeCode() < 0) return NULL; return pCP; }
173,818
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int lxc_attach(const char* name, const char* lxcpath, lxc_attach_exec_t exec_function, void* exec_payload, lxc_attach_options_t* options, pid_t* attached_process) { int ret, status; pid_t init_pid, pid, attached_pid, expected; struct lxc_proc_context_info *init_ctx; char* cwd; char* new_cwd; int ipc_sockets[2]; int procfd; signed long personality; if (!options) options = &attach_static_default_options; init_pid = lxc_cmd_get_init_pid(name, lxcpath); if (init_pid < 0) { ERROR("failed to get the init pid"); return -1; } init_ctx = lxc_proc_get_context_info(init_pid); if (!init_ctx) { ERROR("failed to get context of the init process, pid = %ld", (long)init_pid); return -1; } personality = get_personality(name, lxcpath); if (init_ctx->personality < 0) { ERROR("Failed to get personality of the container"); lxc_proc_put_context_info(init_ctx); return -1; } init_ctx->personality = personality; init_ctx->container = lxc_container_new(name, lxcpath); if (!init_ctx->container) return -1; if (!fetch_seccomp(init_ctx->container, options)) WARN("Failed to get seccomp policy"); if (!no_new_privs(init_ctx->container, options)) WARN("Could not determine whether PR_SET_NO_NEW_PRIVS is set."); cwd = getcwd(NULL, 0); /* determine which namespaces the container was created with * by asking lxc-start, if necessary */ if (options->namespaces == -1) { options->namespaces = lxc_cmd_get_clone_flags(name, lxcpath); /* call failed */ if (options->namespaces == -1) { ERROR("failed to automatically determine the " "namespaces which the container unshared"); free(cwd); lxc_proc_put_context_info(init_ctx); return -1; } } /* create a socket pair for IPC communication; set SOCK_CLOEXEC in order * to make sure we don't irritate other threads that want to fork+exec away * * IMPORTANT: if the initial process is multithreaded and another call * just fork()s away without exec'ing directly after, the socket fd will * exist in the forked process from the other thread and any close() in * our own child process will not really cause the socket to close properly, * potentiall causing the parent to hang. * * For this reason, while IPC is still active, we have to use shutdown() * if the child exits prematurely in order to signal that the socket * is closed and cannot assume that the child exiting will automatically * do that. * * IPC mechanism: (X is receiver) * initial process intermediate attached * X <--- send pid of * attached proc, * then exit * send 0 ------------------------------------> X * [do initialization] * X <------------------------------------ send 1 * [add to cgroup, ...] * send 2 ------------------------------------> X * close socket close socket * run program */ ret = socketpair(PF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets); if (ret < 0) { SYSERROR("could not set up required IPC mechanism for attaching"); free(cwd); lxc_proc_put_context_info(init_ctx); return -1; } /* create intermediate subprocess, three reasons: * 1. runs all pthread_atfork handlers and the * child will no longer be threaded * (we can't properly setns() in a threaded process) * 2. we can't setns() in the child itself, since * we want to make sure we are properly attached to * the pidns * 3. also, the initial thread has to put the attached * process into the cgroup, which we can only do if * we didn't already setns() (otherwise, user * namespaces will hate us) */ pid = fork(); if (pid < 0) { SYSERROR("failed to create first subprocess"); free(cwd); lxc_proc_put_context_info(init_ctx); return -1; } if (pid) { pid_t to_cleanup_pid = pid; /* initial thread, we close the socket that is for the * subprocesses */ close(ipc_sockets[1]); free(cwd); /* attach to cgroup, if requested */ if (options->attach_flags & LXC_ATTACH_MOVE_TO_CGROUP) { if (!cgroup_attach(name, lxcpath, pid)) goto cleanup_error; } /* Let the child process know to go ahead */ status = 0; ret = lxc_write_nointr(ipc_sockets[0], &status, sizeof(status)); if (ret <= 0) { ERROR("error using IPC to notify attached process for initialization (0)"); goto cleanup_error; } /* get pid from intermediate process */ ret = lxc_read_nointr_expect(ipc_sockets[0], &attached_pid, sizeof(attached_pid), NULL); if (ret <= 0) { if (ret != 0) ERROR("error using IPC to receive pid of attached process"); goto cleanup_error; } /* ignore SIGKILL (CTRL-C) and SIGQUIT (CTRL-\) - issue #313 */ if (options->stdin_fd == 0) { signal(SIGINT, SIG_IGN); signal(SIGQUIT, SIG_IGN); } /* reap intermediate process */ ret = wait_for_pid(pid); if (ret < 0) goto cleanup_error; /* we will always have to reap the grandchild now */ to_cleanup_pid = attached_pid; /* tell attached process it may start initializing */ status = 0; ret = lxc_write_nointr(ipc_sockets[0], &status, sizeof(status)); if (ret <= 0) { ERROR("error using IPC to notify attached process for initialization (0)"); goto cleanup_error; } /* wait for the attached process to finish initializing */ expected = 1; ret = lxc_read_nointr_expect(ipc_sockets[0], &status, sizeof(status), &expected); if (ret <= 0) { if (ret != 0) ERROR("error using IPC to receive notification from attached process (1)"); goto cleanup_error; } /* tell attached process we're done */ status = 2; ret = lxc_write_nointr(ipc_sockets[0], &status, sizeof(status)); if (ret <= 0) { ERROR("error using IPC to notify attached process for initialization (2)"); goto cleanup_error; } /* now shut down communication with child, we're done */ shutdown(ipc_sockets[0], SHUT_RDWR); close(ipc_sockets[0]); lxc_proc_put_context_info(init_ctx); /* we're done, the child process should now execute whatever * it is that the user requested. The parent can now track it * with waitpid() or similar. */ *attached_process = attached_pid; return 0; cleanup_error: /* first shut down the socket, then wait for the pid, * otherwise the pid we're waiting for may never exit */ shutdown(ipc_sockets[0], SHUT_RDWR); close(ipc_sockets[0]); if (to_cleanup_pid) (void) wait_for_pid(to_cleanup_pid); lxc_proc_put_context_info(init_ctx); return -1; } /* first subprocess begins here, we close the socket that is for the * initial thread */ close(ipc_sockets[0]); /* Wait for the parent to have setup cgroups */ expected = 0; status = -1; ret = lxc_read_nointr_expect(ipc_sockets[1], &status, sizeof(status), &expected); if (ret <= 0) { ERROR("error communicating with child process"); shutdown(ipc_sockets[1], SHUT_RDWR); rexit(-1); } if ((options->attach_flags & LXC_ATTACH_MOVE_TO_CGROUP) && cgns_supported()) options->namespaces |= CLONE_NEWCGROUP; procfd = open("/proc", O_DIRECTORY | O_RDONLY); if (procfd < 0) { SYSERROR("Unable to open /proc"); shutdown(ipc_sockets[1], SHUT_RDWR); rexit(-1); } /* attach now, create another subprocess later, since pid namespaces * only really affect the children of the current process */ ret = lxc_attach_to_ns(init_pid, options->namespaces); if (ret < 0) { ERROR("failed to enter the namespace"); shutdown(ipc_sockets[1], SHUT_RDWR); rexit(-1); } /* attach succeeded, try to cwd */ if (options->initial_cwd) new_cwd = options->initial_cwd; else new_cwd = cwd; ret = chdir(new_cwd); if (ret < 0) WARN("could not change directory to '%s'", new_cwd); free(cwd); /* now create the real child process */ { struct attach_clone_payload payload = { .ipc_socket = ipc_sockets[1], .options = options, .init_ctx = init_ctx, .exec_function = exec_function, .exec_payload = exec_payload, .procfd = procfd }; /* We use clone_parent here to make this subprocess a direct child of * the initial process. Then this intermediate process can exit and * the parent can directly track the attached process. */ pid = lxc_clone(attach_child_main, &payload, CLONE_PARENT); } /* shouldn't happen, clone() should always return positive pid */ if (pid <= 0) { SYSERROR("failed to create subprocess"); shutdown(ipc_sockets[1], SHUT_RDWR); rexit(-1); } /* tell grandparent the pid of the pid of the newly created child */ ret = lxc_write_nointr(ipc_sockets[1], &pid, sizeof(pid)); if (ret != sizeof(pid)) { /* if this really happens here, this is very unfortunate, since the * parent will not know the pid of the attached process and will * not be able to wait for it (and we won't either due to CLONE_PARENT) * so the parent won't be able to reap it and the attached process * will remain a zombie */ ERROR("error using IPC to notify main process of pid of the attached process"); shutdown(ipc_sockets[1], SHUT_RDWR); rexit(-1); } /* the rest is in the hands of the initial and the attached process */ rexit(0); } Commit Message: attach: do not send procfd to attached process So far, we opened a file descriptor refering to proc on the host inside the host namespace and handed that fd to the attached process in attach_child_main(). This was done to ensure that LSM labels were correctly setup. However, by exploiting a potential kernel bug, ptrace could be used to prevent the file descriptor from being closed which in turn could be used by an unprivileged container to gain access to the host namespace. Aside from this needing an upstream kernel fix, we should make sure that we don't pass the fd for proc itself to the attached process. However, we cannot completely prevent this, as the attached process needs to be able to change its apparmor profile by writing to /proc/self/attr/exec or /proc/self/attr/current. To minimize the attack surface, we only send the fd for /proc/self/attr/exec or /proc/self/attr/current to the attached process. To do this we introduce a little more IPC between the child and parent: * IPC mechanism: (X is receiver) * initial process intermediate attached * X <--- send pid of * attached proc, * then exit * send 0 ------------------------------------> X * [do initialization] * X <------------------------------------ send 1 * [add to cgroup, ...] * send 2 ------------------------------------> X * [set LXC_ATTACH_NO_NEW_PRIVS] * X <------------------------------------ send 3 * [open LSM label fd] * send 4 ------------------------------------> X * [set LSM label] * close socket close socket * run program The attached child tells the parent when it is ready to have its LSM labels set up. The parent then opens an approriate fd for the child PID to /proc/<pid>/attr/exec or /proc/<pid>/attr/current and sends it via SCM_RIGHTS to the child. The child can then set its LSM laben. Both sides then close the socket fds and the child execs the requested process. Signed-off-by: Christian Brauner <[email protected]> CWE ID: CWE-264
int lxc_attach(const char* name, const char* lxcpath, lxc_attach_exec_t exec_function, void* exec_payload, lxc_attach_options_t* options, pid_t* attached_process) { int ret, status; pid_t init_pid, pid, attached_pid, expected; struct lxc_proc_context_info *init_ctx; char* cwd; char* new_cwd; int ipc_sockets[2]; signed long personality; if (!options) options = &attach_static_default_options; init_pid = lxc_cmd_get_init_pid(name, lxcpath); if (init_pid < 0) { ERROR("failed to get the init pid"); return -1; } init_ctx = lxc_proc_get_context_info(init_pid); if (!init_ctx) { ERROR("failed to get context of the init process, pid = %ld", (long)init_pid); return -1; } personality = get_personality(name, lxcpath); if (init_ctx->personality < 0) { ERROR("Failed to get personality of the container"); lxc_proc_put_context_info(init_ctx); return -1; } init_ctx->personality = personality; init_ctx->container = lxc_container_new(name, lxcpath); if (!init_ctx->container) return -1; if (!fetch_seccomp(init_ctx->container, options)) WARN("Failed to get seccomp policy"); if (!no_new_privs(init_ctx->container, options)) WARN("Could not determine whether PR_SET_NO_NEW_PRIVS is set."); cwd = getcwd(NULL, 0); /* determine which namespaces the container was created with * by asking lxc-start, if necessary */ if (options->namespaces == -1) { options->namespaces = lxc_cmd_get_clone_flags(name, lxcpath); /* call failed */ if (options->namespaces == -1) { ERROR("failed to automatically determine the " "namespaces which the container unshared"); free(cwd); lxc_proc_put_context_info(init_ctx); return -1; } } /* create a socket pair for IPC communication; set SOCK_CLOEXEC in order * to make sure we don't irritate other threads that want to fork+exec away * * IMPORTANT: if the initial process is multithreaded and another call * just fork()s away without exec'ing directly after, the socket fd will * exist in the forked process from the other thread and any close() in * our own child process will not really cause the socket to close properly, * potentiall causing the parent to hang. * * For this reason, while IPC is still active, we have to use shutdown() * if the child exits prematurely in order to signal that the socket * is closed and cannot assume that the child exiting will automatically * do that. * * IPC mechanism: (X is receiver) * initial process intermediate attached * X <--- send pid of * attached proc, * then exit * send 0 ------------------------------------> X * [do initialization] * X <------------------------------------ send 1 * [add to cgroup, ...] * send 2 ------------------------------------> X * [set LXC_ATTACH_NO_NEW_PRIVS] * X <------------------------------------ send 3 * [open LSM label fd] * send 4 ------------------------------------> X * [set LSM label] * close socket close socket * run program */ ret = socketpair(PF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets); if (ret < 0) { SYSERROR("could not set up required IPC mechanism for attaching"); free(cwd); lxc_proc_put_context_info(init_ctx); return -1; } /* create intermediate subprocess, three reasons: * 1. runs all pthread_atfork handlers and the * child will no longer be threaded * (we can't properly setns() in a threaded process) * 2. we can't setns() in the child itself, since * we want to make sure we are properly attached to * the pidns * 3. also, the initial thread has to put the attached * process into the cgroup, which we can only do if * we didn't already setns() (otherwise, user * namespaces will hate us) */ pid = fork(); if (pid < 0) { SYSERROR("failed to create first subprocess"); free(cwd); lxc_proc_put_context_info(init_ctx); return -1; } if (pid) { int procfd = -1; pid_t to_cleanup_pid = pid; /* initial thread, we close the socket that is for the * subprocesses */ close(ipc_sockets[1]); free(cwd); /* attach to cgroup, if requested */ if (options->attach_flags & LXC_ATTACH_MOVE_TO_CGROUP) { if (!cgroup_attach(name, lxcpath, pid)) goto cleanup_error; } /* Open /proc before setns() to the containers namespace so we * don't rely on any information from inside the container. */ procfd = open("/proc", O_DIRECTORY | O_RDONLY | O_CLOEXEC); if (procfd < 0) { SYSERROR("Unable to open /proc."); goto cleanup_error; } /* Let the child process know to go ahead */ status = 0; ret = lxc_write_nointr(ipc_sockets[0], &status, sizeof(status)); if (ret <= 0) { ERROR("error using IPC to notify attached process for initialization (0)"); goto cleanup_error; } /* get pid from intermediate process */ ret = lxc_read_nointr_expect(ipc_sockets[0], &attached_pid, sizeof(attached_pid), NULL); if (ret <= 0) { if (ret != 0) ERROR("error using IPC to receive pid of attached process"); goto cleanup_error; } /* ignore SIGKILL (CTRL-C) and SIGQUIT (CTRL-\) - issue #313 */ if (options->stdin_fd == 0) { signal(SIGINT, SIG_IGN); signal(SIGQUIT, SIG_IGN); } /* reap intermediate process */ ret = wait_for_pid(pid); if (ret < 0) goto cleanup_error; /* we will always have to reap the grandchild now */ to_cleanup_pid = attached_pid; /* tell attached process it may start initializing */ status = 0; ret = lxc_write_nointr(ipc_sockets[0], &status, sizeof(status)); if (ret <= 0) { ERROR("error using IPC to notify attached process for initialization (0)"); goto cleanup_error; } /* wait for the attached process to finish initializing */ expected = 1; ret = lxc_read_nointr_expect(ipc_sockets[0], &status, sizeof(status), &expected); if (ret <= 0) { if (ret != 0) ERROR("error using IPC to receive notification " "from attached process (1)"); goto cleanup_error; } /* tell attached process we're done */ status = 2; ret = lxc_write_nointr(ipc_sockets[0], &status, sizeof(status)); if (ret <= 0) { ERROR("Error using IPC to notify attached process for " "initialization (2): %s.", strerror(errno)); goto cleanup_error; } /* Wait for the (grand)child to tell us that it's ready to set * up its LSM labels. */ expected = 3; ret = lxc_read_nointr_expect(ipc_sockets[0], &status, sizeof(status), &expected); if (ret <= 0) { ERROR("Error using IPC for the child to tell us to open LSM fd (3): %s.", strerror(errno)); goto cleanup_error; } /* Open LSM fd and send it to child. */ if ((options->namespaces & CLONE_NEWNS) && (options->attach_flags & LXC_ATTACH_LSM) && init_ctx->lsm_label) { int on_exec, labelfd; on_exec = options->attach_flags & LXC_ATTACH_LSM_EXEC ? 1 : 0; /* Open fd for the LSM security module. */ labelfd = lsm_openat(procfd, attached_pid, on_exec); if (labelfd < 0) goto cleanup_error; /* Send child fd of the LSM security module to write to. */ ret = lxc_abstract_unix_send_fd(ipc_sockets[0], labelfd, NULL, 0); if (ret <= 0) { ERROR("Error using IPC to send child LSM fd (4): %s.", strerror(errno)); goto cleanup_error; } } /* now shut down communication with child, we're done */ shutdown(ipc_sockets[0], SHUT_RDWR); close(ipc_sockets[0]); lxc_proc_put_context_info(init_ctx); /* we're done, the child process should now execute whatever * it is that the user requested. The parent can now track it * with waitpid() or similar. */ *attached_process = attached_pid; return 0; cleanup_error: /* first shut down the socket, then wait for the pid, * otherwise the pid we're waiting for may never exit */ if (procfd >= 0) close(procfd); shutdown(ipc_sockets[0], SHUT_RDWR); close(ipc_sockets[0]); if (to_cleanup_pid) (void) wait_for_pid(to_cleanup_pid); lxc_proc_put_context_info(init_ctx); return -1; } /* first subprocess begins here, we close the socket that is for the * initial thread */ close(ipc_sockets[0]); /* Wait for the parent to have setup cgroups */ expected = 0; status = -1; ret = lxc_read_nointr_expect(ipc_sockets[1], &status, sizeof(status), &expected); if (ret <= 0) { ERROR("error communicating with child process"); shutdown(ipc_sockets[1], SHUT_RDWR); rexit(-1); } if ((options->attach_flags & LXC_ATTACH_MOVE_TO_CGROUP) && cgns_supported()) options->namespaces |= CLONE_NEWCGROUP; /* attach now, create another subprocess later, since pid namespaces * only really affect the children of the current process */ ret = lxc_attach_to_ns(init_pid, options->namespaces); if (ret < 0) { ERROR("failed to enter the namespace"); shutdown(ipc_sockets[1], SHUT_RDWR); rexit(-1); } /* attach succeeded, try to cwd */ if (options->initial_cwd) new_cwd = options->initial_cwd; else new_cwd = cwd; ret = chdir(new_cwd); if (ret < 0) WARN("could not change directory to '%s'", new_cwd); free(cwd); /* now create the real child process */ { struct attach_clone_payload payload = { .ipc_socket = ipc_sockets[1], .options = options, .init_ctx = init_ctx, .exec_function = exec_function, .exec_payload = exec_payload, }; /* We use clone_parent here to make this subprocess a direct child of * the initial process. Then this intermediate process can exit and * the parent can directly track the attached process. */ pid = lxc_clone(attach_child_main, &payload, CLONE_PARENT); } /* shouldn't happen, clone() should always return positive pid */ if (pid <= 0) { SYSERROR("failed to create subprocess"); shutdown(ipc_sockets[1], SHUT_RDWR); rexit(-1); } /* tell grandparent the pid of the pid of the newly created child */ ret = lxc_write_nointr(ipc_sockets[1], &pid, sizeof(pid)); if (ret != sizeof(pid)) { /* if this really happens here, this is very unfortunate, since the * parent will not know the pid of the attached process and will * not be able to wait for it (and we won't either due to CLONE_PARENT) * so the parent won't be able to reap it and the attached process * will remain a zombie */ ERROR("error using IPC to notify main process of pid of the attached process"); shutdown(ipc_sockets[1], SHUT_RDWR); rexit(-1); } /* the rest is in the hands of the initial and the attached process */ rexit(0); }
168,772
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void SoftHEVC::setDecodeArgs(ivd_video_decode_ip_t *ps_dec_ip, ivd_video_decode_op_t *ps_dec_op, OMX_BUFFERHEADERTYPE *inHeader, OMX_BUFFERHEADERTYPE *outHeader, size_t timeStampIx) { size_t sizeY = outputBufferWidth() * outputBufferHeight(); size_t sizeUV; uint8_t *pBuf; ps_dec_ip->u4_size = sizeof(ivd_video_decode_ip_t); ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t); ps_dec_ip->e_cmd = IVD_CMD_VIDEO_DECODE; /* When in flush and after EOS with zero byte input, * inHeader is set to zero. Hence check for non-null */ if (inHeader) { ps_dec_ip->u4_ts = timeStampIx; ps_dec_ip->pv_stream_buffer = inHeader->pBuffer + inHeader->nOffset; ps_dec_ip->u4_num_Bytes = inHeader->nFilledLen; } else { ps_dec_ip->u4_ts = 0; ps_dec_ip->pv_stream_buffer = NULL; ps_dec_ip->u4_num_Bytes = 0; } if (outHeader) { pBuf = outHeader->pBuffer; } else { pBuf = mFlushOutBuffer; } sizeUV = sizeY / 4; ps_dec_ip->s_out_buffer.u4_min_out_buf_size[0] = sizeY; ps_dec_ip->s_out_buffer.u4_min_out_buf_size[1] = sizeUV; ps_dec_ip->s_out_buffer.u4_min_out_buf_size[2] = sizeUV; ps_dec_ip->s_out_buffer.pu1_bufs[0] = pBuf; ps_dec_ip->s_out_buffer.pu1_bufs[1] = pBuf + sizeY; ps_dec_ip->s_out_buffer.pu1_bufs[2] = pBuf + sizeY + sizeUV; ps_dec_ip->s_out_buffer.u4_num_bufs = 3; return; } Commit Message: codecs: check OMX buffer size before use in (avc|hevc|mpeg2)dec Bug: 27833616 Change-Id: Ic4045a3f56f53b08d0b1264b2a91b8f43e91b738 (cherry picked from commit 87fdee0bc9e3ac4d2a88ef0a8e150cfdf08c161d) CWE ID: CWE-20
void SoftHEVC::setDecodeArgs(ivd_video_decode_ip_t *ps_dec_ip, bool SoftHEVC::setDecodeArgs(ivd_video_decode_ip_t *ps_dec_ip, ivd_video_decode_op_t *ps_dec_op, OMX_BUFFERHEADERTYPE *inHeader, OMX_BUFFERHEADERTYPE *outHeader, size_t timeStampIx) { size_t sizeY = outputBufferWidth() * outputBufferHeight(); size_t sizeUV; ps_dec_ip->u4_size = sizeof(ivd_video_decode_ip_t); ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t); ps_dec_ip->e_cmd = IVD_CMD_VIDEO_DECODE; /* When in flush and after EOS with zero byte input, * inHeader is set to zero. Hence check for non-null */ if (inHeader) { ps_dec_ip->u4_ts = timeStampIx; ps_dec_ip->pv_stream_buffer = inHeader->pBuffer + inHeader->nOffset; ps_dec_ip->u4_num_Bytes = inHeader->nFilledLen; } else { ps_dec_ip->u4_ts = 0; ps_dec_ip->pv_stream_buffer = NULL; ps_dec_ip->u4_num_Bytes = 0; } sizeUV = sizeY / 4; ps_dec_ip->s_out_buffer.u4_min_out_buf_size[0] = sizeY; ps_dec_ip->s_out_buffer.u4_min_out_buf_size[1] = sizeUV; ps_dec_ip->s_out_buffer.u4_min_out_buf_size[2] = sizeUV; uint8_t *pBuf; if (outHeader) { if (outHeader->nAllocLen < sizeY + (sizeUV * 2)) { android_errorWriteLog(0x534e4554, "27569635"); return false; } pBuf = outHeader->pBuffer; } else { // mFlushOutBuffer always has the right size. pBuf = mFlushOutBuffer; } ps_dec_ip->s_out_buffer.pu1_bufs[0] = pBuf; ps_dec_ip->s_out_buffer.pu1_bufs[1] = pBuf + sizeY; ps_dec_ip->s_out_buffer.pu1_bufs[2] = pBuf + sizeY + sizeUV; ps_dec_ip->s_out_buffer.u4_num_bufs = 3; return true; }
174,182
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: AcpiPsCompleteFinalOp ( ACPI_WALK_STATE *WalkState, ACPI_PARSE_OBJECT *Op, ACPI_STATUS Status) { ACPI_STATUS Status2; ACPI_FUNCTION_TRACE_PTR (PsCompleteFinalOp, WalkState); /* * Complete the last Op (if not completed), and clear the scope stack. * It is easily possible to end an AML "package" with an unbounded number * of open scopes (such as when several ASL blocks are closed with * sequential closing braces). We want to terminate each one cleanly. */ ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "AML package complete at Op %p\n", Op)); do { if (Op) { if (WalkState->AscendingCallback != NULL) { WalkState->Op = Op; WalkState->OpInfo = AcpiPsGetOpcodeInfo (Op->Common.AmlOpcode); WalkState->Opcode = Op->Common.AmlOpcode; Status = WalkState->AscendingCallback (WalkState); Status = AcpiPsNextParseState (WalkState, Op, Status); if (Status == AE_CTRL_PENDING) { Status = AcpiPsCompleteOp (WalkState, &Op, AE_OK); if (ACPI_FAILURE (Status)) { return_ACPI_STATUS (Status); } } if (Status == AE_CTRL_TERMINATE) { Status = AE_OK; /* Clean up */ do { if (Op) { Status2 = AcpiPsCompleteThisOp (WalkState, Op); if (ACPI_FAILURE (Status2)) { return_ACPI_STATUS (Status2); } } AcpiPsPopScope (&(WalkState->ParserState), &Op, &WalkState->ArgTypes, &WalkState->ArgCount); } while (Op); return_ACPI_STATUS (Status); } else if (ACPI_FAILURE (Status)) { /* First error is most important */ (void) AcpiPsCompleteThisOp (WalkState, Op); return_ACPI_STATUS (Status); } } Status2 = AcpiPsCompleteThisOp (WalkState, Op); if (ACPI_FAILURE (Status2)) { return_ACPI_STATUS (Status2); } } AcpiPsPopScope (&(WalkState->ParserState), &Op, &WalkState->ArgTypes, &WalkState->ArgCount); } while (Op); return_ACPI_STATUS (Status); } Commit Message: acpi: acpica: fix acpi parse and parseext cache leaks I'm Seunghun Han, and I work for National Security Research Institute of South Korea. I have been doing a research on ACPI and found an ACPI cache leak in ACPI early abort cases. Boot log of ACPI cache leak is as follows: [ 0.352414] ACPI: Added _OSI(Module Device) [ 0.353182] ACPI: Added _OSI(Processor Device) [ 0.353182] ACPI: Added _OSI(3.0 _SCP Extensions) [ 0.353182] ACPI: Added _OSI(Processor Aggregator Device) [ 0.356028] ACPI: Unable to start the ACPI Interpreter [ 0.356799] ACPI Error: Could not remove SCI handler (20170303/evmisc-281) [ 0.360215] kmem_cache_destroy Acpi-State: Slab cache still has objects [ 0.360648] CPU: 0 PID: 1 Comm: swapper/0 Tainted: G W 4.12.0-rc4-next-20170608+ #10 [ 0.361273] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 [ 0.361873] Call Trace: [ 0.362243] ? dump_stack+0x5c/0x81 [ 0.362591] ? kmem_cache_destroy+0x1aa/0x1c0 [ 0.362944] ? acpi_sleep_proc_init+0x27/0x27 [ 0.363296] ? acpi_os_delete_cache+0xa/0x10 [ 0.363646] ? acpi_ut_delete_caches+0x6d/0x7b [ 0.364000] ? acpi_terminate+0xa/0x14 [ 0.364000] ? acpi_init+0x2af/0x34f [ 0.364000] ? __class_create+0x4c/0x80 [ 0.364000] ? video_setup+0x7f/0x7f [ 0.364000] ? acpi_sleep_proc_init+0x27/0x27 [ 0.364000] ? do_one_initcall+0x4e/0x1a0 [ 0.364000] ? kernel_init_freeable+0x189/0x20a [ 0.364000] ? rest_init+0xc0/0xc0 [ 0.364000] ? kernel_init+0xa/0x100 [ 0.364000] ? ret_from_fork+0x25/0x30 I analyzed this memory leak in detail. I found that “Acpi-State” cache and “Acpi-Parse” cache were merged because the size of cache objects was same slab cache size. I finally found “Acpi-Parse” cache and “Acpi-ParseExt” cache were leaked using SLAB_NEVER_MERGE flag in kmem_cache_create() function. Real ACPI cache leak point is as follows: [ 0.360101] ACPI: Added _OSI(Module Device) [ 0.360101] ACPI: Added _OSI(Processor Device) [ 0.360101] ACPI: Added _OSI(3.0 _SCP Extensions) [ 0.361043] ACPI: Added _OSI(Processor Aggregator Device) [ 0.364016] ACPI: Unable to start the ACPI Interpreter [ 0.365061] ACPI Error: Could not remove SCI handler (20170303/evmisc-281) [ 0.368174] kmem_cache_destroy Acpi-Parse: Slab cache still has objects [ 0.369332] CPU: 1 PID: 1 Comm: swapper/0 Tainted: G W 4.12.0-rc4-next-20170608+ #8 [ 0.371256] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 [ 0.372000] Call Trace: [ 0.372000] ? dump_stack+0x5c/0x81 [ 0.372000] ? kmem_cache_destroy+0x1aa/0x1c0 [ 0.372000] ? acpi_sleep_proc_init+0x27/0x27 [ 0.372000] ? acpi_os_delete_cache+0xa/0x10 [ 0.372000] ? acpi_ut_delete_caches+0x56/0x7b [ 0.372000] ? acpi_terminate+0xa/0x14 [ 0.372000] ? acpi_init+0x2af/0x34f [ 0.372000] ? __class_create+0x4c/0x80 [ 0.372000] ? video_setup+0x7f/0x7f [ 0.372000] ? acpi_sleep_proc_init+0x27/0x27 [ 0.372000] ? do_one_initcall+0x4e/0x1a0 [ 0.372000] ? kernel_init_freeable+0x189/0x20a [ 0.372000] ? rest_init+0xc0/0xc0 [ 0.372000] ? kernel_init+0xa/0x100 [ 0.372000] ? ret_from_fork+0x25/0x30 [ 0.388039] kmem_cache_destroy Acpi-ParseExt: Slab cache still has objects [ 0.389063] CPU: 1 PID: 1 Comm: swapper/0 Tainted: G W 4.12.0-rc4-next-20170608+ #8 [ 0.390557] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 [ 0.392000] Call Trace: [ 0.392000] ? dump_stack+0x5c/0x81 [ 0.392000] ? kmem_cache_destroy+0x1aa/0x1c0 [ 0.392000] ? acpi_sleep_proc_init+0x27/0x27 [ 0.392000] ? acpi_os_delete_cache+0xa/0x10 [ 0.392000] ? acpi_ut_delete_caches+0x6d/0x7b [ 0.392000] ? acpi_terminate+0xa/0x14 [ 0.392000] ? acpi_init+0x2af/0x34f [ 0.392000] ? __class_create+0x4c/0x80 [ 0.392000] ? video_setup+0x7f/0x7f [ 0.392000] ? acpi_sleep_proc_init+0x27/0x27 [ 0.392000] ? do_one_initcall+0x4e/0x1a0 [ 0.392000] ? kernel_init_freeable+0x189/0x20a [ 0.392000] ? rest_init+0xc0/0xc0 [ 0.392000] ? kernel_init+0xa/0x100 [ 0.392000] ? ret_from_fork+0x25/0x30 When early abort is occurred due to invalid ACPI information, Linux kernel terminates ACPI by calling acpi_terminate() function. The function calls acpi_ut_delete_caches() function to delete local caches (acpi_gbl_namespace_ cache, state_cache, operand_cache, ps_node_cache, ps_node_ext_cache). But the deletion codes in acpi_ut_delete_caches() function only delete slab caches using kmem_cache_destroy() function, therefore the cache objects should be flushed before acpi_ut_delete_caches() function. “Acpi-Parse” cache and “Acpi-ParseExt” cache are used in an AML parse function, acpi_ps_parse_loop(). The function should have flush codes to handle an error state due to invalid AML codes. This cache leak has a security threat because an old kernel (<= 4.9) shows memory locations of kernel functions in stack dump. Some malicious users could use this information to neutralize kernel ASLR. To fix ACPI cache leak for enhancing security, I made a patch which has flush codes in acpi_ps_parse_loop() function. I hope that this patch improves the security of Linux kernel. Thank you. Signed-off-by: Seunghun Han <[email protected]> CWE ID: CWE-200
AcpiPsCompleteFinalOp ( ACPI_WALK_STATE *WalkState, ACPI_PARSE_OBJECT *Op, ACPI_STATUS Status) { ACPI_STATUS ReturnStatus = AE_OK; BOOLEAN Ascending = TRUE; ACPI_FUNCTION_TRACE_PTR (PsCompleteFinalOp, WalkState); /* * Complete the last Op (if not completed), and clear the scope stack. * It is easily possible to end an AML "package" with an unbounded number * of open scopes (such as when several ASL blocks are closed with * sequential closing braces). We want to terminate each one cleanly. */ ACPI_DEBUG_PRINT ((ACPI_DB_PARSE, "AML package complete at Op %p\n", Op)); do { if (Op) { if (Ascending && WalkState->AscendingCallback != NULL) { WalkState->Op = Op; WalkState->OpInfo = AcpiPsGetOpcodeInfo (Op->Common.AmlOpcode); WalkState->Opcode = Op->Common.AmlOpcode; Status = WalkState->AscendingCallback (WalkState); Status = AcpiPsNextParseState (WalkState, Op, Status); if (Status == AE_CTRL_PENDING) { Status = AcpiPsCompleteOp (WalkState, &Op, AE_OK); if (ACPI_FAILURE (Status)) { return_ACPI_STATUS (Status); } } if (Status == AE_CTRL_TERMINATE) { Ascending = FALSE; ReturnStatus = AE_CTRL_TERMINATE; } else if (ACPI_FAILURE (Status)) { /* First error is most important */ Ascending = FALSE; ReturnStatus = Status; } } Status = AcpiPsCompleteThisOp (WalkState, Op); if (ACPI_FAILURE (Status)) { Ascending = FALSE; if (ACPI_SUCCESS (ReturnStatus) || ReturnStatus == AE_CTRL_TERMINATE) { ReturnStatus = Status; } } } AcpiPsPopScope (&(WalkState->ParserState), &Op, &WalkState->ArgTypes, &WalkState->ArgCount); } while (Op); return_ACPI_STATUS (ReturnStatus); }
167,787
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: hfs_cat_traverse(HFS_INFO * hfs, TSK_HFS_BTREE_CB a_cb, void *ptr) { TSK_FS_INFO *fs = &(hfs->fs_info); uint32_t cur_node; /* node id of the current node */ char *node; uint16_t nodesize; uint8_t is_done = 0; tsk_error_reset(); nodesize = tsk_getu16(fs->endian, hfs->catalog_header.nodesize); if ((node = (char *) tsk_malloc(nodesize)) == NULL) return 1; /* start at root node */ cur_node = tsk_getu32(fs->endian, hfs->catalog_header.rootNode); /* if the root node is zero, then the extents btree is empty */ /* if no files have overflow extents, the Extents B-tree still exists on disk, but is an empty B-tree containing only the header node */ if (cur_node == 0) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_traverse: " "empty extents btree\n"); free(node); return 1; } if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_traverse: starting at " "root node %" PRIu32 "; nodesize = %" PRIu16 "\n", cur_node, nodesize); /* Recurse down to the needed leaf nodes and then go forward */ is_done = 0; while (is_done == 0) { TSK_OFF_T cur_off; /* start address of cur_node */ uint16_t num_rec; /* number of records in this node */ ssize_t cnt; hfs_btree_node *node_desc; if (cur_node > tsk_getu32(fs->endian, hfs->catalog_header.totalNodes)) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: Node %d too large for file", cur_node); free(node); return 1; } cur_off = cur_node * nodesize; cnt = tsk_fs_attr_read(hfs->catalog_attr, cur_off, node, nodesize, 0); if (cnt != nodesize) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("hfs_cat_traverse: Error reading node %d at offset %" PRIuOFF, cur_node, cur_off); free(node); return 1; } if (nodesize < sizeof(hfs_btree_node)) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: Node size %d is too small to be valid", nodesize); free(node); return 1; } node_desc = (hfs_btree_node *) node; num_rec = tsk_getu16(fs->endian, node_desc->num_rec); if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_traverse: node %" PRIu32 " @ %" PRIu64 " has %" PRIu16 " records\n", cur_node, cur_off, num_rec); if (num_rec == 0) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr("hfs_cat_traverse: zero records in node %" PRIu32, cur_node); free(node); return 1; } /* With an index node, find the record with the largest key that is smaller * to or equal to cnid */ if (node_desc->type == HFS_BT_NODE_TYPE_IDX) { uint32_t next_node = 0; int rec; for (rec = 0; rec < num_rec; ++rec) { size_t rec_off; hfs_btree_key_cat *key; uint8_t retval; uint16_t keylen; rec_off = tsk_getu16(fs->endian, &node[nodesize - (rec + 1) * 2]); if (rec_off > nodesize) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: offset of record %d in index node %d too large (%d vs %" PRIu16 ")", rec, cur_node, (int) rec_off, nodesize); free(node); return 1; } key = (hfs_btree_key_cat *) & node[rec_off]; keylen = 2 + tsk_getu16(hfs->fs_info.endian, key->key_len); if ((keylen) > nodesize) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: length of key %d in index node %d too large (%d vs %" PRIu16 ")", rec, cur_node, keylen, nodesize); free(node); return 1; } /* if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_traverse: record %" PRIu16 " ; keylen %" PRIu16 " (%" PRIu32 ")\n", rec, tsk_getu16(fs->endian, key->key_len), tsk_getu32(fs->endian, key->parent_cnid)); */ /* save the info from this record unless it is too big */ retval = a_cb(hfs, HFS_BT_NODE_TYPE_IDX, key, cur_off + rec_off, ptr); if (retval == HFS_BTREE_CB_ERR) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr2 ("hfs_cat_traverse: Callback returned error"); free(node); return 1; } else if ((retval == HFS_BTREE_CB_IDX_LT) || (next_node == 0)) { hfs_btree_index_record *idx_rec; int keylen = 2 + hfs_get_idxkeylen(hfs, tsk_getu16(fs->endian, key->key_len), &(hfs->catalog_header)); if (rec_off + keylen > nodesize) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: offset of record and keylength %d in index node %d too large (%d vs %" PRIu16 ")", rec, cur_node, (int) rec_off + keylen, nodesize); free(node); return 1; } idx_rec = (hfs_btree_index_record *) & node[rec_off + keylen]; next_node = tsk_getu32(fs->endian, idx_rec->childNode); } if (retval == HFS_BTREE_CB_IDX_EQGT) { break; } } if (next_node == 0) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: did not find any keys in index node %d", cur_node); is_done = 1; break; } if (next_node == cur_node) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: node %d references itself as next node", cur_node); is_done = 1; break; } cur_node = next_node; } /* With a leaf, we look for the specific record. */ else if (node_desc->type == HFS_BT_NODE_TYPE_LEAF) { int rec; for (rec = 0; rec < num_rec; ++rec) { size_t rec_off; hfs_btree_key_cat *key; uint8_t retval; uint16_t keylen; rec_off = tsk_getu16(fs->endian, &node[nodesize - (rec + 1) * 2]); if (rec_off > nodesize) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: offset of record %d in leaf node %d too large (%d vs %" PRIu16 ")", rec, cur_node, (int) rec_off, nodesize); free(node); return 1; } key = (hfs_btree_key_cat *) & node[rec_off]; keylen = 2 + tsk_getu16(hfs->fs_info.endian, key->key_len); if ((keylen) > nodesize) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: length of key %d in leaf node %d too large (%d vs %" PRIu16 ")", rec, cur_node, keylen, nodesize); free(node); return 1; } /* if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_traverse: record %" PRIu16 "; keylen %" PRIu16 " (%" PRIu32 ")\n", rec, tsk_getu16(fs->endian, key->key_len), tsk_getu32(fs->endian, key->parent_cnid)); */ retval = a_cb(hfs, HFS_BT_NODE_TYPE_LEAF, key, cur_off + rec_off, ptr); if (retval == HFS_BTREE_CB_LEAF_STOP) { is_done = 1; break; } else if (retval == HFS_BTREE_CB_ERR) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr2 ("hfs_cat_traverse: Callback returned error"); free(node); return 1; } } if (is_done == 0) { cur_node = tsk_getu32(fs->endian, node_desc->flink); if (cur_node == 0) { is_done = 1; } if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_traverse: moving forward to next leaf"); } } else { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr("hfs_cat_traverse: btree node %" PRIu32 " (%" PRIu64 ") is neither index nor leaf (%" PRIu8 ")", cur_node, cur_off, node_desc->type); free(node); return 1; } } free(node); return 0; } Commit Message: hfs: fix keylen check in hfs_cat_traverse() If key->key_len is 65535, calculating "uint16_t keylen' would cause an overflow: uint16_t keylen; ... keylen = 2 + tsk_getu16(hfs->fs_info.endian, key->key_len) so the code bypasses the sanity check "if (keylen > nodesize)" which results in crash later: ./toolfs/fstools/fls -b 512 -f hfs <image> ================================================================= ==16==ERROR: AddressSanitizer: SEGV on unknown address 0x6210000256a4 (pc 0x00000054812b bp 0x7ffca548a8f0 sp 0x7ffca548a480 T0) ==16==The signal is caused by a READ memory access. #0 0x54812a in hfs_dir_open_meta_cb /fuzzing/sleuthkit/tsk/fs/hfs_dent.c:237:20 #1 0x51a96c in hfs_cat_traverse /fuzzing/sleuthkit/tsk/fs/hfs.c:1082:21 #2 0x547785 in hfs_dir_open_meta /fuzzing/sleuthkit/tsk/fs/hfs_dent.c:480:9 #3 0x50f57d in tsk_fs_dir_open_meta /fuzzing/sleuthkit/tsk/fs/fs_dir.c:290:14 #4 0x54af17 in tsk_fs_path2inum /fuzzing/sleuthkit/tsk/fs/ifind_lib.c:237:23 #5 0x522266 in hfs_open /fuzzing/sleuthkit/tsk/fs/hfs.c:6579:9 #6 0x508e89 in main /fuzzing/sleuthkit/tools/fstools/fls.cpp:267:19 #7 0x7f9daf67c2b0 in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x202b0) #8 0x41d679 in _start (/fuzzing/sleuthkit/tools/fstools/fls+0x41d679) Make 'keylen' int type to prevent the overflow and fix that. Now, I get proper error message instead of crash: ./toolfs/fstools/fls -b 512 -f hfs <image> General file system error (hfs_cat_traverse: length of key 3 in leaf node 1 too large (65537 vs 4096)) CWE ID: CWE-190
hfs_cat_traverse(HFS_INFO * hfs, TSK_HFS_BTREE_CB a_cb, void *ptr) { TSK_FS_INFO *fs = &(hfs->fs_info); uint32_t cur_node; /* node id of the current node */ char *node; uint16_t nodesize; uint8_t is_done = 0; tsk_error_reset(); nodesize = tsk_getu16(fs->endian, hfs->catalog_header.nodesize); if ((node = (char *) tsk_malloc(nodesize)) == NULL) return 1; /* start at root node */ cur_node = tsk_getu32(fs->endian, hfs->catalog_header.rootNode); /* if the root node is zero, then the extents btree is empty */ /* if no files have overflow extents, the Extents B-tree still exists on disk, but is an empty B-tree containing only the header node */ if (cur_node == 0) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_traverse: " "empty extents btree\n"); free(node); return 1; } if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_traverse: starting at " "root node %" PRIu32 "; nodesize = %" PRIu16 "\n", cur_node, nodesize); /* Recurse down to the needed leaf nodes and then go forward */ is_done = 0; while (is_done == 0) { TSK_OFF_T cur_off; /* start address of cur_node */ uint16_t num_rec; /* number of records in this node */ ssize_t cnt; hfs_btree_node *node_desc; if (cur_node > tsk_getu32(fs->endian, hfs->catalog_header.totalNodes)) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: Node %d too large for file", cur_node); free(node); return 1; } cur_off = cur_node * nodesize; cnt = tsk_fs_attr_read(hfs->catalog_attr, cur_off, node, nodesize, 0); if (cnt != nodesize) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("hfs_cat_traverse: Error reading node %d at offset %" PRIuOFF, cur_node, cur_off); free(node); return 1; } if (nodesize < sizeof(hfs_btree_node)) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: Node size %d is too small to be valid", nodesize); free(node); return 1; } node_desc = (hfs_btree_node *) node; num_rec = tsk_getu16(fs->endian, node_desc->num_rec); if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_traverse: node %" PRIu32 " @ %" PRIu64 " has %" PRIu16 " records\n", cur_node, cur_off, num_rec); if (num_rec == 0) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr("hfs_cat_traverse: zero records in node %" PRIu32, cur_node); free(node); return 1; } /* With an index node, find the record with the largest key that is smaller * to or equal to cnid */ if (node_desc->type == HFS_BT_NODE_TYPE_IDX) { uint32_t next_node = 0; int rec; for (rec = 0; rec < num_rec; ++rec) { size_t rec_off; hfs_btree_key_cat *key; uint8_t retval; int keylen; rec_off = tsk_getu16(fs->endian, &node[nodesize - (rec + 1) * 2]); if (rec_off > nodesize) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: offset of record %d in index node %d too large (%d vs %" PRIu16 ")", rec, cur_node, (int) rec_off, nodesize); free(node); return 1; } key = (hfs_btree_key_cat *) & node[rec_off]; keylen = 2 + tsk_getu16(hfs->fs_info.endian, key->key_len); if ((keylen) > nodesize) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: length of key %d in index node %d too large (%d vs %" PRIu16 ")", rec, cur_node, keylen, nodesize); free(node); return 1; } /* if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_traverse: record %" PRIu16 " ; keylen %" PRIu16 " (%" PRIu32 ")\n", rec, tsk_getu16(fs->endian, key->key_len), tsk_getu32(fs->endian, key->parent_cnid)); */ /* save the info from this record unless it is too big */ retval = a_cb(hfs, HFS_BT_NODE_TYPE_IDX, key, cur_off + rec_off, ptr); if (retval == HFS_BTREE_CB_ERR) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr2 ("hfs_cat_traverse: Callback returned error"); free(node); return 1; } else if ((retval == HFS_BTREE_CB_IDX_LT) || (next_node == 0)) { hfs_btree_index_record *idx_rec; int keylen = 2 + hfs_get_idxkeylen(hfs, tsk_getu16(fs->endian, key->key_len), &(hfs->catalog_header)); if (rec_off + keylen > nodesize) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: offset of record and keylength %d in index node %d too large (%d vs %" PRIu16 ")", rec, cur_node, (int) rec_off + keylen, nodesize); free(node); return 1; } idx_rec = (hfs_btree_index_record *) & node[rec_off + keylen]; next_node = tsk_getu32(fs->endian, idx_rec->childNode); } if (retval == HFS_BTREE_CB_IDX_EQGT) { break; } } if (next_node == 0) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: did not find any keys in index node %d", cur_node); is_done = 1; break; } if (next_node == cur_node) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: node %d references itself as next node", cur_node); is_done = 1; break; } cur_node = next_node; } /* With a leaf, we look for the specific record. */ else if (node_desc->type == HFS_BT_NODE_TYPE_LEAF) { int rec; for (rec = 0; rec < num_rec; ++rec) { size_t rec_off; hfs_btree_key_cat *key; uint8_t retval; int keylen; rec_off = tsk_getu16(fs->endian, &node[nodesize - (rec + 1) * 2]); if (rec_off > nodesize) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: offset of record %d in leaf node %d too large (%d vs %" PRIu16 ")", rec, cur_node, (int) rec_off, nodesize); free(node); return 1; } key = (hfs_btree_key_cat *) & node[rec_off]; keylen = 2 + tsk_getu16(hfs->fs_info.endian, key->key_len); if ((keylen) > nodesize) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: length of key %d in leaf node %d too large (%d vs %" PRIu16 ")", rec, cur_node, keylen, nodesize); free(node); return 1; } /* if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_traverse: record %" PRIu16 "; keylen %" PRIu16 " (%" PRIu32 ")\n", rec, tsk_getu16(fs->endian, key->key_len), tsk_getu32(fs->endian, key->parent_cnid)); */ retval = a_cb(hfs, HFS_BT_NODE_TYPE_LEAF, key, cur_off + rec_off, ptr); if (retval == HFS_BTREE_CB_LEAF_STOP) { is_done = 1; break; } else if (retval == HFS_BTREE_CB_ERR) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr2 ("hfs_cat_traverse: Callback returned error"); free(node); return 1; } } if (is_done == 0) { cur_node = tsk_getu32(fs->endian, node_desc->flink); if (cur_node == 0) { is_done = 1; } if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_traverse: moving forward to next leaf"); } } else { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr("hfs_cat_traverse: btree node %" PRIu32 " (%" PRIu64 ") is neither index nor leaf (%" PRIu8 ")", cur_node, cur_off, node_desc->type); free(node); return 1; } } free(node); return 0; }
169,482
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: png_have_neon(png_structp png_ptr) { FILE *f = fopen("/proc/cpuinfo", "rb"); if (f != NULL) { /* This is a simple state machine which reads the input byte-by-byte until * it gets a match on the 'neon' feature or reaches the end of the stream. */ static const char ch_feature[] = { 70, 69, 65, 84, 85, 82, 69, 83 }; static const char ch_neon[] = { 78, 69, 79, 78 }; enum { StartLine, Feature, Colon, StartTag, Neon, HaveNeon, SkipTag, SkipLine } state; int counter; for (state=StartLine, counter=0;;) { int ch = fgetc(f); if (ch == EOF) { /* EOF means error or end-of-file, return false; neon at EOF is * assumed to be a mistake. */ fclose(f); return 0; } switch (state) { case StartLine: /* Match spaces at the start of line */ if (ch <= 32) /* skip control characters and space */ break; counter=0; state = Feature; /* FALL THROUGH */ case Feature: /* Match 'FEATURE', ASCII case insensitive. */ if ((ch & ~0x20) == ch_feature[counter]) { if (++counter == (sizeof ch_feature)) state = Colon; break; } /* did not match 'feature' */ state = SkipLine; /* FALL THROUGH */ case SkipLine: skipLine: /* Skip everything until we see linefeed or carriage return */ if (ch != 10 && ch != 13) break; state = StartLine; break; case Colon: /* Match any number of space or tab followed by ':' */ if (ch == 32 || ch == 9) break; if (ch == 58) /* i.e. ':' */ { state = StartTag; break; } /* Either a bad line format or a 'feature' prefix followed by * other characters. */ state = SkipLine; goto skipLine; case StartTag: /* Skip space characters before a tag */ if (ch == 32 || ch == 9) break; state = Neon; counter = 0; /* FALL THROUGH */ case Neon: /* Look for 'neon' tag */ if ((ch & ~0x20) == ch_neon[counter]) { if (++counter == (sizeof ch_neon)) state = HaveNeon; break; } state = SkipTag; /* FALL THROUGH */ case SkipTag: /* Skip non-space characters */ if (ch == 10 || ch == 13) state = StartLine; else if (ch == 32 || ch == 9) state = StartTag; break; case HaveNeon: /* Have seen a 'neon' prefix, but there must be a space or new * line character to terminate it. */ if (ch == 10 || ch == 13 || ch == 32 || ch == 9) { fclose(f); return 1; } state = SkipTag; break; default: png_error(png_ptr, "png_have_neon: internal error (bug)"); } } } else png_warning(png_ptr, "/proc/cpuinfo open failed"); return 0; } Commit Message: DO NOT MERGE Update libpng to 1.6.20 BUG:23265085 Change-Id: I85199805636d771f3597b691b63bc0bf46084833 (cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82) CWE ID:
png_have_neon(png_structp png_ptr) { FILE *f = fopen("/proc/cpuinfo", "rb"); if (f != NULL) { /* This is a simple state machine which reads the input byte-by-byte until * it gets a match on the 'neon' feature or reaches the end of the stream. */ static const char ch_feature[] = { 70, 69, 65, 84, 85, 82, 69, 83 }; static const char ch_neon[] = { 78, 69, 79, 78 }; enum { StartLine, Feature, Colon, StartTag, Neon, HaveNeon, SkipTag, SkipLine } state; int counter; for (state=StartLine, counter=0;;) { int ch = fgetc(f); if (ch == EOF) { /* EOF means error or end-of-file, return false; neon at EOF is * assumed to be a mistake. */ fclose(f); return 0; } switch (state) { case StartLine: /* Match spaces at the start of line */ if (ch <= 32) /* skip control characters and space */ break; counter=0; state = Feature; /* FALL THROUGH */ case Feature: /* Match 'FEATURE', ASCII case insensitive. */ if ((ch & ~0x20) == ch_feature[counter]) { if (++counter == (sizeof ch_feature)) state = Colon; break; } /* did not match 'feature' */ state = SkipLine; /* FALL THROUGH */ case SkipLine: skipLine: /* Skip everything until we see linefeed or carriage return */ if (ch != 10 && ch != 13) break; state = StartLine; break; case Colon: /* Match any number of space or tab followed by ':' */ if (ch == 32 || ch == 9) break; if (ch == 58) /* i.e. ':' */ { state = StartTag; break; } /* Either a bad line format or a 'feature' prefix followed by * other characters. */ state = SkipLine; goto skipLine; case StartTag: /* Skip space characters before a tag */ if (ch == 32 || ch == 9) break; state = Neon; counter = 0; /* FALL THROUGH */ case Neon: /* Look for 'neon' tag */ if ((ch & ~0x20) == ch_neon[counter]) { if (++counter == (sizeof ch_neon)) state = HaveNeon; break; } state = SkipTag; /* FALL THROUGH */ case SkipTag: /* Skip non-space characters */ if (ch == 10 || ch == 13) state = StartLine; else if (ch == 32 || ch == 9) state = StartTag; break; case HaveNeon: /* Have seen a 'neon' prefix, but there must be a space or new * line character to terminate it. */ if (ch == 10 || ch == 13 || ch == 32 || ch == 9) { fclose(f); return 1; } state = SkipTag; break; default: png_error(png_ptr, "png_have_neon: internal error (bug)"); } } } #ifdef PNG_WARNINGS_SUPPORTED else png_warning(png_ptr, "/proc/cpuinfo open failed"); #endif return 0; }
173,565
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: transform_display_init(transform_display *dp, png_modifier *pm, png_uint_32 id, PNG_CONST image_transform *transform_list) { memset(dp, 0, sizeof *dp); /* Standard fields */ standard_display_init(&dp->this, &pm->this, id, 0/*do_interlace*/, pm->use_update_info); /* Parameter fields */ dp->pm = pm; dp->transform_list = transform_list; /* Local variable fields */ dp->output_colour_type = 255; /* invalid */ dp->output_bit_depth = 255; /* invalid */ } Commit Message: DO NOT MERGE Update libpng to 1.6.20 BUG:23265085 Change-Id: I85199805636d771f3597b691b63bc0bf46084833 (cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82) CWE ID:
transform_display_init(transform_display *dp, png_modifier *pm, png_uint_32 id, const image_transform *transform_list) { memset(dp, 0, sizeof *dp); /* Standard fields */ standard_display_init(&dp->this, &pm->this, id, do_read_interlace, pm->use_update_info); /* Parameter fields */ dp->pm = pm; dp->transform_list = transform_list; dp->max_gamma_8 = 16; /* Local variable fields */ dp->output_colour_type = 255; /* invalid */ dp->output_bit_depth = 255; /* invalid */ dp->unpacked = 0; /* not unpacked */ }
173,712
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static UINT dvcman_receive_channel_data(drdynvcPlugin* drdynvc, IWTSVirtualChannelManager* pChannelMgr, UINT32 ChannelId, wStream* data) { UINT status = CHANNEL_RC_OK; DVCMAN_CHANNEL* channel; size_t dataSize = Stream_GetRemainingLength(data); channel = (DVCMAN_CHANNEL*) dvcman_find_channel_by_id(pChannelMgr, ChannelId); if (!channel) { /* Windows 8.1 tries to open channels not created. * Ignore cases like this. */ WLog_Print(drdynvc->log, WLOG_ERROR, "ChannelId %"PRIu32" not found!", ChannelId); return CHANNEL_RC_OK; } if (channel->dvc_data) { /* Fragmented data */ if (Stream_GetPosition(channel->dvc_data) + dataSize > (UINT32) Stream_Capacity( channel->dvc_data)) { WLog_Print(drdynvc->log, WLOG_ERROR, "data exceeding declared length!"); Stream_Release(channel->dvc_data); channel->dvc_data = NULL; return ERROR_INVALID_DATA; } Stream_Write(channel->dvc_data, Stream_Pointer(data), dataSize); if (Stream_GetPosition(channel->dvc_data) >= channel->dvc_data_length) { Stream_SealLength(channel->dvc_data); Stream_SetPosition(channel->dvc_data, 0); status = channel->channel_callback->OnDataReceived(channel->channel_callback, channel->dvc_data); Stream_Release(channel->dvc_data); channel->dvc_data = NULL; } } else { status = channel->channel_callback->OnDataReceived(channel->channel_callback, data); } return status; } Commit Message: Fix for #4866: Added additional length checks CWE ID:
static UINT dvcman_receive_channel_data(drdynvcPlugin* drdynvc, IWTSVirtualChannelManager* pChannelMgr, UINT32 ChannelId, wStream* data) { UINT status = CHANNEL_RC_OK; DVCMAN_CHANNEL* channel; size_t dataSize = Stream_GetRemainingLength(data); channel = (DVCMAN_CHANNEL*) dvcman_find_channel_by_id(pChannelMgr, ChannelId); if (!channel) { /* Windows 8.1 tries to open channels not created. * Ignore cases like this. */ WLog_Print(drdynvc->log, WLOG_ERROR, "ChannelId %"PRIu32" not found!", ChannelId); return CHANNEL_RC_OK; } if (channel->dvc_data) { /* Fragmented data */ if (Stream_GetPosition(channel->dvc_data) + dataSize > Stream_Capacity(channel->dvc_data)) { WLog_Print(drdynvc->log, WLOG_ERROR, "data exceeding declared length!"); Stream_Release(channel->dvc_data); channel->dvc_data = NULL; return ERROR_INVALID_DATA; } Stream_Copy(data, channel->dvc_data, dataSize); if (Stream_GetPosition(channel->dvc_data) >= channel->dvc_data_length) { Stream_SealLength(channel->dvc_data); Stream_SetPosition(channel->dvc_data, 0); status = channel->channel_callback->OnDataReceived(channel->channel_callback, channel->dvc_data); Stream_Release(channel->dvc_data); channel->dvc_data = NULL; } } else { status = channel->channel_callback->OnDataReceived(channel->channel_callback, data); } return status; }
168,940
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int ssl_scan_serverhello_tlsext(SSL *s, unsigned char **p, unsigned char *d, int n, int *al) { unsigned short length; unsigned short type; unsigned short size; unsigned char *data = *p; int tlsext_servername = 0; int renegotiate_seen = 0; #ifndef OPENSSL_NO_NEXTPROTONEG s->s3->next_proto_neg_seen = 0; #endif if (s->s3->alpn_selected) { OPENSSL_free(s->s3->alpn_selected); s->s3->alpn_selected = NULL; } #ifndef OPENSSL_NO_HEARTBEATS s->tlsext_heartbeat &= ~(SSL_TLSEXT_HB_ENABLED | SSL_TLSEXT_HB_DONT_SEND_REQUESTS); #endif #ifdef TLSEXT_TYPE_encrypt_then_mac s->s3->flags &= ~TLS1_FLAGS_ENCRYPT_THEN_MAC; #endif if (data >= (d+n-2)) goto ri_check; n2s(data,length); if (data+length != d+n) { *al = SSL_AD_DECODE_ERROR; return 0; } while(data <= (d+n-4)) { n2s(data,type); n2s(data,size); if (data+size > (d+n)) goto ri_check; if (s->tlsext_debug_cb) s->tlsext_debug_cb(s, 1, type, data, size, s->tlsext_debug_arg); if (type == TLSEXT_TYPE_server_name) { if (s->tlsext_hostname == NULL || size > 0) { *al = TLS1_AD_UNRECOGNIZED_NAME; return 0; } tlsext_servername = 1; } #ifndef OPENSSL_NO_EC else if (type == TLSEXT_TYPE_ec_point_formats) { unsigned char *sdata = data; int ecpointformatlist_length = *(sdata++); if (ecpointformatlist_length != size - 1) { *al = TLS1_AD_DECODE_ERROR; return 0; } s->session->tlsext_ecpointformatlist_length = 0; if (s->session->tlsext_ecpointformatlist != NULL) OPENSSL_free(s->session->tlsext_ecpointformatlist); if ((s->session->tlsext_ecpointformatlist = OPENSSL_malloc(ecpointformatlist_length)) == NULL) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } s->session->tlsext_ecpointformatlist_length = ecpointformatlist_length; memcpy(s->session->tlsext_ecpointformatlist, sdata, ecpointformatlist_length); #if 0 fprintf(stderr,"ssl_parse_serverhello_tlsext s->session->tlsext_ecpointformatlist "); sdata = s->session->tlsext_ecpointformatlist; #endif } #endif /* OPENSSL_NO_EC */ else if (type == TLSEXT_TYPE_session_ticket) { if (s->tls_session_ticket_ext_cb && !s->tls_session_ticket_ext_cb(s, data, size, s->tls_session_ticket_ext_cb_arg)) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } if (!tls_use_ticket(s) || (size > 0)) { *al = TLS1_AD_UNSUPPORTED_EXTENSION; return 0; } s->tlsext_ticket_expected = 1; } #ifdef TLSEXT_TYPE_opaque_prf_input else if (type == TLSEXT_TYPE_opaque_prf_input) { unsigned char *sdata = data; if (size < 2) { *al = SSL_AD_DECODE_ERROR; return 0; } n2s(sdata, s->s3->server_opaque_prf_input_len); if (s->s3->server_opaque_prf_input_len != size - 2) { *al = SSL_AD_DECODE_ERROR; return 0; } if (s->s3->server_opaque_prf_input != NULL) /* shouldn't really happen */ OPENSSL_free(s->s3->server_opaque_prf_input); if (s->s3->server_opaque_prf_input_len == 0) s->s3->server_opaque_prf_input = OPENSSL_malloc(1); /* dummy byte just to get non-NULL */ else s->s3->server_opaque_prf_input = BUF_memdup(sdata, s->s3->server_opaque_prf_input_len); if (s->s3->server_opaque_prf_input == NULL) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } } #endif else if (type == TLSEXT_TYPE_status_request) { /* MUST be empty and only sent if we've requested * a status request message. */ if ((s->tlsext_status_type == -1) || (size > 0)) { *al = TLS1_AD_UNSUPPORTED_EXTENSION; return 0; } /* Set flag to expect CertificateStatus message */ s->tlsext_status_expected = 1; } #ifndef OPENSSL_NO_NEXTPROTONEG else if (type == TLSEXT_TYPE_next_proto_neg && s->s3->tmp.finish_md_len == 0) { unsigned char *selected; unsigned char selected_len; /* We must have requested it. */ if (s->ctx->next_proto_select_cb == NULL) { *al = TLS1_AD_UNSUPPORTED_EXTENSION; return 0; } /* The data must be valid */ if (!ssl_next_proto_validate(data, size)) { *al = TLS1_AD_DECODE_ERROR; return 0; } if (s->ctx->next_proto_select_cb(s, &selected, &selected_len, data, size, s->ctx->next_proto_select_cb_arg) != SSL_TLSEXT_ERR_OK) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } s->next_proto_negotiated = OPENSSL_malloc(selected_len); if (!s->next_proto_negotiated) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } memcpy(s->next_proto_negotiated, selected, selected_len); s->next_proto_negotiated_len = selected_len; s->s3->next_proto_neg_seen = 1; } #endif else if (type == TLSEXT_TYPE_application_layer_protocol_negotiation) { unsigned len; /* We must have requested it. */ if (s->alpn_client_proto_list == NULL) { *al = TLS1_AD_UNSUPPORTED_EXTENSION; return 0; } if (size < 4) { *al = TLS1_AD_DECODE_ERROR; return 0; } /* The extension data consists of: * uint16 list_length * uint8 proto_length; * uint8 proto[proto_length]; */ len = data[0]; len <<= 8; len |= data[1]; if (len != (unsigned) size - 2) { *al = TLS1_AD_DECODE_ERROR; return 0; } len = data[2]; if (len != (unsigned) size - 3) { *al = TLS1_AD_DECODE_ERROR; return 0; } if (s->s3->alpn_selected) OPENSSL_free(s->s3->alpn_selected); s->s3->alpn_selected = OPENSSL_malloc(len); if (!s->s3->alpn_selected) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } memcpy(s->s3->alpn_selected, data + 3, len); s->s3->alpn_selected_len = len; } else if (type == TLSEXT_TYPE_renegotiate) { if(!ssl_parse_serverhello_renegotiate_ext(s, data, size, al)) return 0; renegotiate_seen = 1; } #ifndef OPENSSL_NO_HEARTBEATS else if (type == TLSEXT_TYPE_heartbeat) { switch(data[0]) { case 0x01: /* Server allows us to send HB requests */ s->tlsext_heartbeat |= SSL_TLSEXT_HB_ENABLED; break; case 0x02: /* Server doesn't accept HB requests */ s->tlsext_heartbeat |= SSL_TLSEXT_HB_ENABLED; s->tlsext_heartbeat |= SSL_TLSEXT_HB_DONT_SEND_REQUESTS; break; default: *al = SSL_AD_ILLEGAL_PARAMETER; return 0; } } #endif else if (type == TLSEXT_TYPE_use_srtp) { if(ssl_parse_serverhello_use_srtp_ext(s, data, size, al)) return 0; } /* If this extension type was not otherwise handled, but * matches a custom_cli_ext_record, then send it to the c * callback */ else if (s->ctx->custom_cli_ext_records_count) { size_t i; custom_cli_ext_record* record; for (i = 0; i < s->ctx->custom_cli_ext_records_count; i++) { record = &s->ctx->custom_cli_ext_records[i]; if (record->ext_type == type) { if (record->fn2 && !record->fn2(s, type, data, size, al, record->arg)) return 0; break; } } } #ifdef TLSEXT_TYPE_encrypt_then_mac else if (type == TLSEXT_TYPE_encrypt_then_mac) { /* Ignore if inappropriate ciphersuite */ if (s->s3->tmp.new_cipher->algorithm_mac != SSL_AEAD) s->s3->flags |= TLS1_FLAGS_ENCRYPT_THEN_MAC; } #endif data += size; } if (data != d+n) { *al = SSL_AD_DECODE_ERROR; return 0; } if (!s->hit && tlsext_servername == 1) { if (s->tlsext_hostname) { if (s->session->tlsext_hostname == NULL) { s->session->tlsext_hostname = BUF_strdup(s->tlsext_hostname); if (!s->session->tlsext_hostname) { *al = SSL_AD_UNRECOGNIZED_NAME; return 0; } } else { *al = SSL_AD_DECODE_ERROR; return 0; } } } *p = data; ri_check: /* Determine if we need to see RI. Strictly speaking if we want to * avoid an attack we should *always* see RI even on initial server * hello because the client doesn't see any renegotiation during an * attack. However this would mean we could not connect to any server * which doesn't support RI so for the immediate future tolerate RI * absence on initial connect only. */ if (!renegotiate_seen && !(s->options & SSL_OP_LEGACY_SERVER_CONNECT) && !(s->options & SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION)) { *al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL_SCAN_SERVERHELLO_TLSEXT, SSL_R_UNSAFE_LEGACY_RENEGOTIATION_DISABLED); return 0; } return 1; } Commit Message: CWE ID: CWE-362
static int ssl_scan_serverhello_tlsext(SSL *s, unsigned char **p, unsigned char *d, int n, int *al) { unsigned short length; unsigned short type; unsigned short size; unsigned char *data = *p; int tlsext_servername = 0; int renegotiate_seen = 0; #ifndef OPENSSL_NO_NEXTPROTONEG s->s3->next_proto_neg_seen = 0; #endif if (s->s3->alpn_selected) { OPENSSL_free(s->s3->alpn_selected); s->s3->alpn_selected = NULL; } #ifndef OPENSSL_NO_HEARTBEATS s->tlsext_heartbeat &= ~(SSL_TLSEXT_HB_ENABLED | SSL_TLSEXT_HB_DONT_SEND_REQUESTS); #endif #ifdef TLSEXT_TYPE_encrypt_then_mac s->s3->flags &= ~TLS1_FLAGS_ENCRYPT_THEN_MAC; #endif if (data >= (d+n-2)) goto ri_check; n2s(data,length); if (data+length != d+n) { *al = SSL_AD_DECODE_ERROR; return 0; } while(data <= (d+n-4)) { n2s(data,type); n2s(data,size); if (data+size > (d+n)) goto ri_check; if (s->tlsext_debug_cb) s->tlsext_debug_cb(s, 1, type, data, size, s->tlsext_debug_arg); if (type == TLSEXT_TYPE_server_name) { if (s->tlsext_hostname == NULL || size > 0) { *al = TLS1_AD_UNRECOGNIZED_NAME; return 0; } tlsext_servername = 1; } #ifndef OPENSSL_NO_EC else if (type == TLSEXT_TYPE_ec_point_formats) { unsigned char *sdata = data; int ecpointformatlist_length = *(sdata++); if (ecpointformatlist_length != size - 1) { *al = TLS1_AD_DECODE_ERROR; return 0; } if (!s->hit) { s->session->tlsext_ecpointformatlist_length = 0; if (s->session->tlsext_ecpointformatlist != NULL) OPENSSL_free(s->session->tlsext_ecpointformatlist); if ((s->session->tlsext_ecpointformatlist = OPENSSL_malloc(ecpointformatlist_length)) == NULL) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } s->session->tlsext_ecpointformatlist_length = ecpointformatlist_length; memcpy(s->session->tlsext_ecpointformatlist, sdata, ecpointformatlist_length); } #if 0 fprintf(stderr,"ssl_parse_serverhello_tlsext s->session->tlsext_ecpointformatlist "); sdata = s->session->tlsext_ecpointformatlist; #endif } #endif /* OPENSSL_NO_EC */ else if (type == TLSEXT_TYPE_session_ticket) { if (s->tls_session_ticket_ext_cb && !s->tls_session_ticket_ext_cb(s, data, size, s->tls_session_ticket_ext_cb_arg)) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } if (!tls_use_ticket(s) || (size > 0)) { *al = TLS1_AD_UNSUPPORTED_EXTENSION; return 0; } s->tlsext_ticket_expected = 1; } #ifdef TLSEXT_TYPE_opaque_prf_input else if (type == TLSEXT_TYPE_opaque_prf_input) { unsigned char *sdata = data; if (size < 2) { *al = SSL_AD_DECODE_ERROR; return 0; } n2s(sdata, s->s3->server_opaque_prf_input_len); if (s->s3->server_opaque_prf_input_len != size - 2) { *al = SSL_AD_DECODE_ERROR; return 0; } if (s->s3->server_opaque_prf_input != NULL) /* shouldn't really happen */ OPENSSL_free(s->s3->server_opaque_prf_input); if (s->s3->server_opaque_prf_input_len == 0) s->s3->server_opaque_prf_input = OPENSSL_malloc(1); /* dummy byte just to get non-NULL */ else s->s3->server_opaque_prf_input = BUF_memdup(sdata, s->s3->server_opaque_prf_input_len); if (s->s3->server_opaque_prf_input == NULL) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } } #endif else if (type == TLSEXT_TYPE_status_request) { /* MUST be empty and only sent if we've requested * a status request message. */ if ((s->tlsext_status_type == -1) || (size > 0)) { *al = TLS1_AD_UNSUPPORTED_EXTENSION; return 0; } /* Set flag to expect CertificateStatus message */ s->tlsext_status_expected = 1; } #ifndef OPENSSL_NO_NEXTPROTONEG else if (type == TLSEXT_TYPE_next_proto_neg && s->s3->tmp.finish_md_len == 0) { unsigned char *selected; unsigned char selected_len; /* We must have requested it. */ if (s->ctx->next_proto_select_cb == NULL) { *al = TLS1_AD_UNSUPPORTED_EXTENSION; return 0; } /* The data must be valid */ if (!ssl_next_proto_validate(data, size)) { *al = TLS1_AD_DECODE_ERROR; return 0; } if (s->ctx->next_proto_select_cb(s, &selected, &selected_len, data, size, s->ctx->next_proto_select_cb_arg) != SSL_TLSEXT_ERR_OK) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } s->next_proto_negotiated = OPENSSL_malloc(selected_len); if (!s->next_proto_negotiated) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } memcpy(s->next_proto_negotiated, selected, selected_len); s->next_proto_negotiated_len = selected_len; s->s3->next_proto_neg_seen = 1; } #endif else if (type == TLSEXT_TYPE_application_layer_protocol_negotiation) { unsigned len; /* We must have requested it. */ if (s->alpn_client_proto_list == NULL) { *al = TLS1_AD_UNSUPPORTED_EXTENSION; return 0; } if (size < 4) { *al = TLS1_AD_DECODE_ERROR; return 0; } /* The extension data consists of: * uint16 list_length * uint8 proto_length; * uint8 proto[proto_length]; */ len = data[0]; len <<= 8; len |= data[1]; if (len != (unsigned) size - 2) { *al = TLS1_AD_DECODE_ERROR; return 0; } len = data[2]; if (len != (unsigned) size - 3) { *al = TLS1_AD_DECODE_ERROR; return 0; } if (s->s3->alpn_selected) OPENSSL_free(s->s3->alpn_selected); s->s3->alpn_selected = OPENSSL_malloc(len); if (!s->s3->alpn_selected) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } memcpy(s->s3->alpn_selected, data + 3, len); s->s3->alpn_selected_len = len; } else if (type == TLSEXT_TYPE_renegotiate) { if(!ssl_parse_serverhello_renegotiate_ext(s, data, size, al)) return 0; renegotiate_seen = 1; } #ifndef OPENSSL_NO_HEARTBEATS else if (type == TLSEXT_TYPE_heartbeat) { switch(data[0]) { case 0x01: /* Server allows us to send HB requests */ s->tlsext_heartbeat |= SSL_TLSEXT_HB_ENABLED; break; case 0x02: /* Server doesn't accept HB requests */ s->tlsext_heartbeat |= SSL_TLSEXT_HB_ENABLED; s->tlsext_heartbeat |= SSL_TLSEXT_HB_DONT_SEND_REQUESTS; break; default: *al = SSL_AD_ILLEGAL_PARAMETER; return 0; } } #endif else if (type == TLSEXT_TYPE_use_srtp) { if(ssl_parse_serverhello_use_srtp_ext(s, data, size, al)) return 0; } /* If this extension type was not otherwise handled, but * matches a custom_cli_ext_record, then send it to the c * callback */ else if (s->ctx->custom_cli_ext_records_count) { size_t i; custom_cli_ext_record* record; for (i = 0; i < s->ctx->custom_cli_ext_records_count; i++) { record = &s->ctx->custom_cli_ext_records[i]; if (record->ext_type == type) { if (record->fn2 && !record->fn2(s, type, data, size, al, record->arg)) return 0; break; } } } #ifdef TLSEXT_TYPE_encrypt_then_mac else if (type == TLSEXT_TYPE_encrypt_then_mac) { /* Ignore if inappropriate ciphersuite */ if (s->s3->tmp.new_cipher->algorithm_mac != SSL_AEAD) s->s3->flags |= TLS1_FLAGS_ENCRYPT_THEN_MAC; } #endif data += size; } if (data != d+n) { *al = SSL_AD_DECODE_ERROR; return 0; } if (!s->hit && tlsext_servername == 1) { if (s->tlsext_hostname) { if (s->session->tlsext_hostname == NULL) { s->session->tlsext_hostname = BUF_strdup(s->tlsext_hostname); if (!s->session->tlsext_hostname) { *al = SSL_AD_UNRECOGNIZED_NAME; return 0; } } else { *al = SSL_AD_DECODE_ERROR; return 0; } } } *p = data; ri_check: /* Determine if we need to see RI. Strictly speaking if we want to * avoid an attack we should *always* see RI even on initial server * hello because the client doesn't see any renegotiation during an * attack. However this would mean we could not connect to any server * which doesn't support RI so for the immediate future tolerate RI * absence on initial connect only. */ if (!renegotiate_seen && !(s->options & SSL_OP_LEGACY_SERVER_CONNECT) && !(s->options & SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION)) { *al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL_SCAN_SERVERHELLO_TLSEXT, SSL_R_UNSAFE_LEGACY_RENEGOTIATION_DISABLED); return 0; } return 1; }
165,175
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void BluetoothDeviceChromeOS::OnRegisterAgent( const base::Closure& callback, const ConnectErrorCallback& error_callback) { VLOG(1) << object_path_.value() << ": Agent registered, now pairing"; DBusThreadManager::Get()->GetBluetoothDeviceClient()-> Pair(object_path_, base::Bind(&BluetoothDeviceChromeOS::OnPair, weak_ptr_factory_.GetWeakPtr(), callback, error_callback), base::Bind(&BluetoothDeviceChromeOS::OnPairError, weak_ptr_factory_.GetWeakPtr(), error_callback)); } Commit Message: Refactor to support default Bluetooth pairing delegate In order to support a default pairing delegate we need to move the agent service provider delegate implementation from BluetoothDevice to BluetoothAdapter while retaining the existing API. BUG=338492 TEST=device_unittests, unit_tests, browser_tests Review URL: https://codereview.chromium.org/148293003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@252216 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID:
void BluetoothDeviceChromeOS::OnRegisterAgent(
171,229
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, infop, int, options, struct rusage __user *, ru) { struct rusage r; struct waitid_info info = {.status = 0}; long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL); int signo = 0; if (err > 0) { signo = SIGCHLD; err = 0; } if (!err) { if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) return -EFAULT; } if (!infop) return err; user_access_begin(); unsafe_put_user(signo, &infop->si_signo, Efault); unsafe_put_user(0, &infop->si_errno, Efault); unsafe_put_user(info.cause, &infop->si_code, Efault); unsafe_put_user(info.pid, &infop->si_pid, Efault); unsafe_put_user(info.uid, &infop->si_uid, Efault); unsafe_put_user(info.status, &infop->si_status, Efault); user_access_end(); return err; Efault: user_access_end(); return -EFAULT; } Commit Message: fix infoleak in waitid(2) kernel_waitid() can return a PID, an error or 0. rusage is filled in the first case and waitid(2) rusage should've been copied out exactly in that case, *not* whenever kernel_waitid() has not returned an error. Compat variant shares that braino; none of kernel_wait4() callers do, so the below ought to fix it. Reported-and-tested-by: Alexander Potapenko <[email protected]> Fixes: ce72a16fa705 ("wait4(2)/waitid(2): separate copying rusage to userland") Cc: [email protected] # v4.13 Signed-off-by: Al Viro <[email protected]> CWE ID: CWE-200
SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, infop, int, options, struct rusage __user *, ru) { struct rusage r; struct waitid_info info = {.status = 0}; long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL); int signo = 0; if (err > 0) { signo = SIGCHLD; err = 0; if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) return -EFAULT; } if (!infop) return err; user_access_begin(); unsafe_put_user(signo, &infop->si_signo, Efault); unsafe_put_user(0, &infop->si_errno, Efault); unsafe_put_user(info.cause, &infop->si_code, Efault); unsafe_put_user(info.pid, &infop->si_pid, Efault); unsafe_put_user(info.uid, &infop->si_uid, Efault); unsafe_put_user(info.status, &infop->si_status, Efault); user_access_end(); return err; Efault: user_access_end(); return -EFAULT; }
167,743
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: rdpdr_process(STREAM s) { uint32 handle; uint16 vmin; uint16 component; uint16 pakid; logger(Protocol, Debug, "rdpdr_process()"); /* hexdump(s->p, s->end - s->p); */ in_uint16(s, component); in_uint16(s, pakid); if (component == RDPDR_CTYP_CORE) { switch (pakid) { case PAKID_CORE_DEVICE_IOREQUEST: rdpdr_process_irp(s); break; case PAKID_CORE_SERVER_ANNOUNCE: /* DR_CORE_SERVER_ANNOUNCE_REQ */ in_uint8s(s, 2); /* skip versionMajor */ in_uint16_le(s, vmin); /* VersionMinor */ in_uint32_le(s, g_client_id); /* ClientID */ /* The RDP client is responsibility to provide a random client id if server version is < 12 */ if (vmin < 0x000c) g_client_id = 0x815ed39d; /* IP address (use 127.0.0.1) 0x815ed39d */ g_epoch++; #if WITH_SCARD /* * We need to release all SCARD contexts to end all * current transactions and pending calls */ scard_release_all_contexts(); /* * According to [MS-RDPEFS] 3.2.5.1.2: * * If this packet appears after a sequence of other packets, * it is a signal that the server has reconnected to a new session * and the whole sequence has been reset. The client MUST treat * this packet as the beginning of a new sequence. * The client MUST also cancel all outstanding requests and release * previous references to all devices. * * If any problem arises in the future, please, pay attention to the * "If this packet appears after a sequence of other packets" part * */ #endif rdpdr_send_client_announce_reply(); rdpdr_send_client_name_request(); break; case PAKID_CORE_CLIENTID_CONFIRM: rdpdr_send_client_device_list_announce(); break; case PAKID_CORE_DEVICE_REPLY: in_uint32(s, handle); logger(Protocol, Debug, "rdpdr_process(), server connected to resource %d", handle); break; case PAKID_CORE_SERVER_CAPABILITY: rdpdr_send_client_capability_response(); break; default: logger(Protocol, Debug, "rdpdr_process(), pakid 0x%x of component 0x%x", pakid, component); break; } } else if (component == RDPDR_CTYP_PRN) { if (pakid == PAKID_PRN_CACHE_DATA) printercache_process(s); } else logger(Protocol, Warning, "rdpdr_process(), unhandled component 0x%x", component); } Commit Message: Malicious RDP server security fixes This commit includes fixes for a set of 21 vulnerabilities in rdesktop when a malicious RDP server is used. All vulnerabilities was identified and reported by Eyal Itkin. * Add rdp_protocol_error function that is used in several fixes * Refactor of process_bitmap_updates * Fix possible integer overflow in s_check_rem() on 32bit arch * Fix memory corruption in process_bitmap_data - CVE-2018-8794 * Fix remote code execution in process_bitmap_data - CVE-2018-8795 * Fix remote code execution in process_plane - CVE-2018-8797 * Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175 * Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175 * Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176 * Fix Denial of Service in sec_recv - CVE-2018-20176 * Fix minor information leak in rdpdr_process - CVE-2018-8791 * Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792 * Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793 * Fix Denial of Service in process_bitmap_data - CVE-2018-8796 * Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798 * Fix Denial of Service in process_secondary_order - CVE-2018-8799 * Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800 * Fix major information leak in ui_clip_handle_data - CVE-2018-20174 * Fix memory corruption in rdp_in_unistr - CVE-2018-20177 * Fix Denial of Service in process_demand_active - CVE-2018-20178 * Fix remote code execution in lspci_process - CVE-2018-20179 * Fix remote code execution in rdpsnddbg_process - CVE-2018-20180 * Fix remote code execution in seamless_process - CVE-2018-20181 * Fix remote code execution in seamless_process_line - CVE-2018-20182 CWE ID: CWE-119
rdpdr_process(STREAM s) { uint32 handle; uint16 vmin; uint16 component; uint16 pakid; struct stream packet = *s; logger(Protocol, Debug, "rdpdr_process()"); /* hexdump(s->p, s->end - s->p); */ in_uint16(s, component); in_uint16(s, pakid); if (component == RDPDR_CTYP_CORE) { switch (pakid) { case PAKID_CORE_DEVICE_IOREQUEST: rdpdr_process_irp(s); break; case PAKID_CORE_SERVER_ANNOUNCE: /* DR_CORE_SERVER_ANNOUNCE_REQ */ in_uint8s(s, 2); /* skip versionMajor */ in_uint16_le(s, vmin); /* VersionMinor */ in_uint32_le(s, g_client_id); /* ClientID */ /* g_client_id is sent back to server, so lets check that we actually got valid data from stream to prevent that we leak back data to server */ if (!s_check(s)) { rdp_protocol_error("rdpdr_process(), consume of g_client_id from stream did overrun", &packet); } /* The RDP client is responsibility to provide a random client id if server version is < 12 */ if (vmin < 0x000c) g_client_id = 0x815ed39d; /* IP address (use 127.0.0.1) 0x815ed39d */ g_epoch++; #if WITH_SCARD /* * We need to release all SCARD contexts to end all * current transactions and pending calls */ scard_release_all_contexts(); /* * According to [MS-RDPEFS] 3.2.5.1.2: * * If this packet appears after a sequence of other packets, * it is a signal that the server has reconnected to a new session * and the whole sequence has been reset. The client MUST treat * this packet as the beginning of a new sequence. * The client MUST also cancel all outstanding requests and release * previous references to all devices. * * If any problem arises in the future, please, pay attention to the * "If this packet appears after a sequence of other packets" part * */ #endif rdpdr_send_client_announce_reply(); rdpdr_send_client_name_request(); break; case PAKID_CORE_CLIENTID_CONFIRM: rdpdr_send_client_device_list_announce(); break; case PAKID_CORE_DEVICE_REPLY: in_uint32(s, handle); logger(Protocol, Debug, "rdpdr_process(), server connected to resource %d", handle); break; case PAKID_CORE_SERVER_CAPABILITY: rdpdr_send_client_capability_response(); break; default: logger(Protocol, Debug, "rdpdr_process(), pakid 0x%x of component 0x%x", pakid, component); break; } } else if (component == RDPDR_CTYP_PRN) { if (pakid == PAKID_PRN_CACHE_DATA) printercache_process(s); } else logger(Protocol, Warning, "rdpdr_process(), unhandled component 0x%x", component); }
169,805
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void BrowserWindowGtk::UpdateDevToolsForContents(WebContents* contents) { TRACE_EVENT0("ui::gtk", "BrowserWindowGtk::UpdateDevToolsForContents"); DevToolsWindow* new_devtools_window = contents ? DevToolsWindow::GetDockedInstanceForInspectedTab(contents) : NULL; if (devtools_window_ == new_devtools_window && (!new_devtools_window || new_devtools_window->dock_side() == devtools_dock_side_)) return; if (devtools_window_ != new_devtools_window) { if (devtools_window_) devtools_container_->DetachTab(devtools_window_->tab_contents()); devtools_container_->SetTab( new_devtools_window ? new_devtools_window->tab_contents() : NULL); if (new_devtools_window) { new_devtools_window->tab_contents()->web_contents()->WasShown(); } } if (devtools_window_) { GtkAllocation contents_rect; gtk_widget_get_allocation(contents_vsplit_, &contents_rect); if (devtools_dock_side_ == DEVTOOLS_DOCK_SIDE_RIGHT) { devtools_window_->SetWidth( contents_rect.width - gtk_paned_get_position(GTK_PANED(contents_hsplit_))); } else { devtools_window_->SetHeight( contents_rect.height - gtk_paned_get_position(GTK_PANED(contents_vsplit_))); } } bool should_hide = devtools_window_ && (!new_devtools_window || devtools_dock_side_ != new_devtools_window->dock_side()); bool should_show = new_devtools_window && (!devtools_window_ || should_hide); if (should_hide) HideDevToolsContainer(); devtools_window_ = new_devtools_window; if (should_show) { devtools_dock_side_ = new_devtools_window->dock_side(); ShowDevToolsContainer(); } else if (new_devtools_window) { UpdateDevToolsSplitPosition(); } } Commit Message: Remove TabContents from TabStripModelObserver::TabDetachedAt. BUG=107201 TEST=no visible change Review URL: https://chromiumcodereview.appspot.com/11293205 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167122 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-20
void BrowserWindowGtk::UpdateDevToolsForContents(WebContents* contents) { TRACE_EVENT0("ui::gtk", "BrowserWindowGtk::UpdateDevToolsForContents"); DevToolsWindow* new_devtools_window = contents ? DevToolsWindow::GetDockedInstanceForInspectedTab(contents) : NULL; if (devtools_window_ == new_devtools_window && (!new_devtools_window || new_devtools_window->dock_side() == devtools_dock_side_)) return; if (devtools_window_ != new_devtools_window) { if (devtools_window_) { devtools_container_->DetachTab( devtools_window_->tab_contents()->web_contents()); } devtools_container_->SetTab( new_devtools_window ? new_devtools_window->tab_contents() : NULL); if (new_devtools_window) { new_devtools_window->tab_contents()->web_contents()->WasShown(); } } if (devtools_window_) { GtkAllocation contents_rect; gtk_widget_get_allocation(contents_vsplit_, &contents_rect); if (devtools_dock_side_ == DEVTOOLS_DOCK_SIDE_RIGHT) { devtools_window_->SetWidth( contents_rect.width - gtk_paned_get_position(GTK_PANED(contents_hsplit_))); } else { devtools_window_->SetHeight( contents_rect.height - gtk_paned_get_position(GTK_PANED(contents_vsplit_))); } } bool should_hide = devtools_window_ && (!new_devtools_window || devtools_dock_side_ != new_devtools_window->dock_side()); bool should_show = new_devtools_window && (!devtools_window_ || should_hide); if (should_hide) HideDevToolsContainer(); devtools_window_ = new_devtools_window; if (should_show) { devtools_dock_side_ = new_devtools_window->dock_side(); ShowDevToolsContainer(); } else if (new_devtools_window) { UpdateDevToolsSplitPosition(); } }
171,514
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void TextTrackCue::setEndTime(double value) { if (end_time_ == value || value < 0) return; CueWillChange(); end_time_ = value; CueDidChange(kCueMutationAffectsOrder); } Commit Message: Support negative timestamps of TextTrackCue Ensure proper behaviour for negative timestamps of TextTrackCue. 1. Cues with negative startTime should become active from 0s. 2. Cues with negative startTime and endTime should never be active. Bug: 314032 Change-Id: Ib53710e58be0be770c933ea8c3c4709a0e5dec0d Reviewed-on: https://chromium-review.googlesource.com/863270 Commit-Queue: srirama chandra sekhar <[email protected]> Reviewed-by: Fredrik Söderquist <[email protected]> Cr-Commit-Position: refs/heads/master@{#529012} CWE ID:
void TextTrackCue::setEndTime(double value) { if (end_time_ == value) return; CueWillChange(); end_time_ = value; CueDidChange(kCueMutationAffectsOrder); }
171,769
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool DeserializeNotificationDatabaseData(const std::string& input, NotificationDatabaseData* output) { DCHECK(output); NotificationDatabaseDataProto message; if (!message.ParseFromString(input)) return false; output->notification_id = message.notification_id(); output->origin = GURL(message.origin()); output->service_worker_registration_id = message.service_worker_registration_id(); PlatformNotificationData* notification_data = &output->notification_data; const NotificationDatabaseDataProto::NotificationData& payload = message.notification_data(); notification_data->title = base::UTF8ToUTF16(payload.title()); switch (payload.direction()) { case NotificationDatabaseDataProto::NotificationData::LEFT_TO_RIGHT: notification_data->direction = PlatformNotificationData::DIRECTION_LEFT_TO_RIGHT; break; case NotificationDatabaseDataProto::NotificationData::RIGHT_TO_LEFT: notification_data->direction = PlatformNotificationData::DIRECTION_RIGHT_TO_LEFT; break; case NotificationDatabaseDataProto::NotificationData::AUTO: notification_data->direction = PlatformNotificationData::DIRECTION_AUTO; break; } notification_data->lang = payload.lang(); notification_data->body = base::UTF8ToUTF16(payload.body()); notification_data->tag = payload.tag(); notification_data->icon = GURL(payload.icon()); if (payload.vibration_pattern().size() > 0) { notification_data->vibration_pattern.assign( payload.vibration_pattern().begin(), payload.vibration_pattern().end()); } notification_data->timestamp = base::Time::FromInternalValue(payload.timestamp()); notification_data->silent = payload.silent(); notification_data->require_interaction = payload.require_interaction(); if (payload.data().length()) { notification_data->data.assign(payload.data().begin(), payload.data().end()); } for (const auto& payload_action : payload.actions()) { PlatformNotificationAction action; action.action = payload_action.action(); action.title = base::UTF8ToUTF16(payload_action.title()); notification_data->actions.push_back(action); } return true; } Commit Message: Notification actions may have an icon url. This is behind a runtime flag for two reasons: * The implementation is incomplete. * We're still evaluating the API design. Intent to Implement and Ship: Notification Action Icons https://groups.google.com/a/chromium.org/d/msg/blink-dev/IM0HxOP7HOA/y8tu6iq1CgAJ BUG=581336 Review URL: https://codereview.chromium.org/1644573002 Cr-Commit-Position: refs/heads/master@{#374649} CWE ID:
bool DeserializeNotificationDatabaseData(const std::string& input, NotificationDatabaseData* output) { DCHECK(output); NotificationDatabaseDataProto message; if (!message.ParseFromString(input)) return false; output->notification_id = message.notification_id(); output->origin = GURL(message.origin()); output->service_worker_registration_id = message.service_worker_registration_id(); PlatformNotificationData* notification_data = &output->notification_data; const NotificationDatabaseDataProto::NotificationData& payload = message.notification_data(); notification_data->title = base::UTF8ToUTF16(payload.title()); switch (payload.direction()) { case NotificationDatabaseDataProto::NotificationData::LEFT_TO_RIGHT: notification_data->direction = PlatformNotificationData::DIRECTION_LEFT_TO_RIGHT; break; case NotificationDatabaseDataProto::NotificationData::RIGHT_TO_LEFT: notification_data->direction = PlatformNotificationData::DIRECTION_RIGHT_TO_LEFT; break; case NotificationDatabaseDataProto::NotificationData::AUTO: notification_data->direction = PlatformNotificationData::DIRECTION_AUTO; break; } notification_data->lang = payload.lang(); notification_data->body = base::UTF8ToUTF16(payload.body()); notification_data->tag = payload.tag(); notification_data->icon = GURL(payload.icon()); if (payload.vibration_pattern().size() > 0) { notification_data->vibration_pattern.assign( payload.vibration_pattern().begin(), payload.vibration_pattern().end()); } notification_data->timestamp = base::Time::FromInternalValue(payload.timestamp()); notification_data->silent = payload.silent(); notification_data->require_interaction = payload.require_interaction(); if (payload.data().length()) { notification_data->data.assign(payload.data().begin(), payload.data().end()); } for (const auto& payload_action : payload.actions()) { PlatformNotificationAction action; action.action = payload_action.action(); action.title = base::UTF8ToUTF16(payload_action.title()); action.icon = GURL(payload_action.icon()); notification_data->actions.push_back(action); } return true; }
171,629
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting) { const struct k_clock *kc = timr->kclock; ktime_t now, remaining, iv; struct timespec64 ts64; bool sig_none; sig_none = timr->it_sigev_notify == SIGEV_NONE; iv = timr->it_interval; /* interval timer ? */ if (iv) { cur_setting->it_interval = ktime_to_timespec64(iv); } else if (!timr->it_active) { /* * SIGEV_NONE oneshot timers are never queued. Check them * below. */ if (!sig_none) return; } /* * The timespec64 based conversion is suboptimal, but it's not * worth to implement yet another callback. */ kc->clock_get(timr->it_clock, &ts64); now = timespec64_to_ktime(ts64); /* * When a requeue is pending or this is a SIGEV_NONE timer move the * expiry time forward by intervals, so expiry is > now. */ if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || sig_none)) timr->it_overrun += (int)kc->timer_forward(timr, now); remaining = kc->timer_remaining(timr, now); /* Return 0 only, when the timer is expired and not pending */ if (remaining <= 0) { /* * A single shot SIGEV_NONE timer must return 0, when * it is expired ! */ if (!sig_none) cur_setting->it_value.tv_nsec = 1; } else { cur_setting->it_value = ktime_to_timespec64(remaining); } } Commit Message: posix-timers: Sanitize overrun handling The posix timer overrun handling is broken because the forwarding functions can return a huge number of overruns which does not fit in an int. As a consequence timer_getoverrun(2) and siginfo::si_overrun can turn into random number generators. The k_clock::timer_forward() callbacks return a 64 bit value now. Make k_itimer::ti_overrun[_last] 64bit as well, so the kernel internal accounting is correct. 3Remove the temporary (int) casts. Add a helper function which clamps the overrun value returned to user space via timer_getoverrun(2) or siginfo::si_overrun limited to a positive value between 0 and INT_MAX. INT_MAX is an indicator for user space that the overrun value has been clamped. Reported-by: Team OWL337 <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Acked-by: John Stultz <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Michael Kerrisk <[email protected]> Link: https://lkml.kernel.org/r/[email protected] CWE ID: CWE-190
void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting) { const struct k_clock *kc = timr->kclock; ktime_t now, remaining, iv; struct timespec64 ts64; bool sig_none; sig_none = timr->it_sigev_notify == SIGEV_NONE; iv = timr->it_interval; /* interval timer ? */ if (iv) { cur_setting->it_interval = ktime_to_timespec64(iv); } else if (!timr->it_active) { /* * SIGEV_NONE oneshot timers are never queued. Check them * below. */ if (!sig_none) return; } /* * The timespec64 based conversion is suboptimal, but it's not * worth to implement yet another callback. */ kc->clock_get(timr->it_clock, &ts64); now = timespec64_to_ktime(ts64); /* * When a requeue is pending or this is a SIGEV_NONE timer move the * expiry time forward by intervals, so expiry is > now. */ if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || sig_none)) timr->it_overrun += kc->timer_forward(timr, now); remaining = kc->timer_remaining(timr, now); /* Return 0 only, when the timer is expired and not pending */ if (remaining <= 0) { /* * A single shot SIGEV_NONE timer must return 0, when * it is expired ! */ if (!sig_none) cur_setting->it_value.tv_nsec = 1; } else { cur_setting->it_value = ktime_to_timespec64(remaining); } }
169,180
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void InspectorClientImpl::clearBrowserCookies() { if (WebDevToolsAgentImpl* agent = devToolsAgent()) agent->clearBrowserCookies(); } Commit Message: [4/4] Process clearBrowserCahce/cookies commands in browser. BUG=366585 Review URL: https://codereview.chromium.org/251183005 git-svn-id: svn://svn.chromium.org/blink/trunk@172984 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID:
void InspectorClientImpl::clearBrowserCookies()
171,347
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool ExtensionService::IsDownloadFromGallery(const GURL& download_url, const GURL& referrer_url) { if (IsDownloadFromMiniGallery(download_url) && StartsWithASCII(referrer_url.spec(), extension_urls::kMiniGalleryBrowsePrefix, false)) { return true; } const Extension* download_extension = GetExtensionByWebExtent(download_url); const Extension* referrer_extension = GetExtensionByWebExtent(referrer_url); const Extension* webstore_app = GetWebStoreApp(); bool referrer_valid = (referrer_extension == webstore_app); bool download_valid = (download_extension == webstore_app); GURL store_url = GURL(CommandLine::ForCurrentProcess()->GetSwitchValueASCII( switches::kAppsGalleryURL)); if (!store_url.is_empty()) { std::string store_tld = net::RegistryControlledDomainService::GetDomainAndRegistry(store_url); if (!referrer_valid) { std::string referrer_tld = net::RegistryControlledDomainService::GetDomainAndRegistry( referrer_url); referrer_valid = referrer_url.is_empty() || (referrer_tld == store_tld); } if (!download_valid) { std::string download_tld = net::RegistryControlledDomainService::GetDomainAndRegistry( download_url); download_valid = (download_tld == store_tld); } } return (referrer_valid && download_valid); } Commit Message: Limit extent of webstore app to just chrome.google.com/webstore. BUG=93497 TEST=Try installing extensions and apps from the webstore, starting both being initially logged in, and not. Review URL: http://codereview.chromium.org/7719003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@97986 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-264
bool ExtensionService::IsDownloadFromGallery(const GURL& download_url, const GURL& referrer_url) { if (IsDownloadFromMiniGallery(download_url) && StartsWithASCII(referrer_url.spec(), extension_urls::kMiniGalleryBrowsePrefix, false)) { return true; } const Extension* download_extension = GetExtensionByWebExtent(download_url); const Extension* referrer_extension = GetExtensionByWebExtent(referrer_url); const Extension* webstore_app = GetWebStoreApp(); bool referrer_valid = (referrer_extension == webstore_app); bool download_valid = (download_extension == webstore_app); // We also allow the download to be from a small set of trusted paths. if (!download_valid) { for (size_t i = 0; i < arraysize(kAllowedDownloadURLPatterns); i++) { URLPattern pattern(URLPattern::SCHEME_HTTPS, kAllowedDownloadURLPatterns[i]); if (pattern.MatchesURL(download_url)) { download_valid = true; break; } } } GURL store_url = GURL(CommandLine::ForCurrentProcess()->GetSwitchValueASCII( switches::kAppsGalleryURL)); if (!store_url.is_empty()) { std::string store_tld = net::RegistryControlledDomainService::GetDomainAndRegistry(store_url); if (!referrer_valid) { std::string referrer_tld = net::RegistryControlledDomainService::GetDomainAndRegistry( referrer_url); referrer_valid = referrer_url.is_empty() || (referrer_tld == store_tld); } if (!download_valid) { std::string download_tld = net::RegistryControlledDomainService::GetDomainAndRegistry( download_url); download_valid = (download_tld == store_tld); } } return (referrer_valid && download_valid); }
170,320
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void FillConstant(uint8_t *data, int stride, uint8_t fill_constant) { for (int h = 0; h < height_; ++h) { for (int w = 0; w < width_; ++w) { data[h * stride + w] = fill_constant; } } } Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478 DO NOT MERGE - libvpx: Pull from upstream Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06 BUG=23452792 Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec CWE ID: CWE-119
void FillConstant(uint8_t *data, int stride, uint8_t fill_constant) { // Sum of Absolute Differences Average. Given two blocks, and a prediction // calculate the absolute difference between one pixel and average of the // corresponding and predicted pixels; accumulate. unsigned int ReferenceSADavg(int block_idx) { unsigned int sad = 0; const uint8_t *const reference8 = GetReference(block_idx); const uint8_t *const source8 = source_data_; const uint8_t *const second_pred8 = second_pred_; #if CONFIG_VP9_HIGHBITDEPTH const uint16_t *const reference16 = CONVERT_TO_SHORTPTR(GetReference(block_idx)); const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_); const uint16_t *const second_pred16 = CONVERT_TO_SHORTPTR(second_pred_); #endif // CONFIG_VP9_HIGHBITDEPTH for (int h = 0; h < height_; ++h) { for (int w = 0; w < width_; ++w) { if (!use_high_bit_depth_) { const int tmp = second_pred8[h * width_ + w] + reference8[h * reference_stride_ + w]; const uint8_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1); sad += abs(source8[h * source_stride_ + w] - comp_pred); #if CONFIG_VP9_HIGHBITDEPTH } else { const int tmp = second_pred16[h * width_ + w] + reference16[h * reference_stride_ + w]; const uint16_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1); sad += abs(source16[h * source_stride_ + w] - comp_pred); #endif // CONFIG_VP9_HIGHBITDEPTH } } } return sad; } void FillConstant(uint8_t *data, int stride, uint16_t fill_constant) { uint8_t *data8 = data; #if CONFIG_VP9_HIGHBITDEPTH uint16_t *data16 = CONVERT_TO_SHORTPTR(data); #endif // CONFIG_VP9_HIGHBITDEPTH for (int h = 0; h < height_; ++h) { for (int w = 0; w < width_; ++w) { if (!use_high_bit_depth_) { data8[h * stride + w] = static_cast<uint8_t>(fill_constant); #if CONFIG_VP9_HIGHBITDEPTH } else { data16[h * stride + w] = fill_constant; #endif // CONFIG_VP9_HIGHBITDEPTH } } } }
174,571
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: dissect_u3v(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data) { gint offset = 0; proto_tree *u3v_tree = NULL, *ccd_tree_flag, *u3v_telegram_tree = NULL, *ccd_tree = NULL; gint data_length = 0; gint req_id = 0; gint command_id = -1; gint status = 0; guint prefix = 0; proto_item *ti = NULL; proto_item *item = NULL; const char *command_string; usb_conv_info_t *usb_conv_info; gint stream_detected = FALSE; gint control_detected = FALSE; u3v_conv_info_t *u3v_conv_info = NULL; gencp_transaction_t *gencp_trans = NULL; usb_conv_info = (usb_conv_info_t *)data; /* decide if this packet belongs to U3V protocol */ u3v_conv_info = (u3v_conv_info_t *)usb_conv_info->class_data; if (!u3v_conv_info) { u3v_conv_info = wmem_new0(wmem_file_scope(), u3v_conv_info_t); usb_conv_info->class_data = u3v_conv_info; } prefix = tvb_get_letohl(tvb, 0); if ((tvb_reported_length(tvb) >= 4) && ( ( U3V_CONTROL_PREFIX == prefix ) || ( U3V_EVENT_PREFIX == prefix ) ) ) { control_detected = TRUE; } if (((tvb_reported_length(tvb) >= 4) && (( U3V_STREAM_LEADER_PREFIX == prefix ) || ( U3V_STREAM_TRAILER_PREFIX == prefix ))) || (usb_conv_info->endpoint == u3v_conv_info->ep_stream)) { stream_detected = TRUE; } /* initialize interface class/subclass in case no descriptors have been dissected yet */ if ( control_detected || stream_detected){ if ( usb_conv_info->interfaceClass == IF_CLASS_UNKNOWN && usb_conv_info->interfaceSubclass == IF_SUBCLASS_UNKNOWN){ usb_conv_info->interfaceClass = IF_CLASS_MISCELLANEOUS; usb_conv_info->interfaceSubclass = IF_SUBCLASS_MISC_U3V; } } if ( control_detected ) { /* Set the protocol column */ col_set_str(pinfo->cinfo, COL_PROTOCOL, "U3V"); /* Clear out stuff in the info column */ col_clear(pinfo->cinfo, COL_INFO); /* Adds "USB3Vision" heading to protocol tree */ /* We will add fields to this using the u3v_tree pointer */ ti = proto_tree_add_item(tree, proto_u3v, tvb, offset, -1, ENC_NA); u3v_tree = proto_item_add_subtree(ti, ett_u3v); prefix = tvb_get_letohl(tvb, offset); command_id = tvb_get_letohs(tvb, offset+6); /* decode CCD ( DCI/DCE command data layout) */ if ((prefix == U3V_CONTROL_PREFIX || prefix == U3V_EVENT_PREFIX) && ((command_id % 2) == 0)) { command_string = val_to_str(command_id,command_names,"Unknown Command (0x%x)"); item = proto_tree_add_item(u3v_tree, hf_u3v_ccd_cmd, tvb, offset, 8, ENC_NA); proto_item_append_text(item, ": %s", command_string); ccd_tree = proto_item_add_subtree(item, ett_u3v_cmd); /* Add the prefix code: */ proto_tree_add_item(ccd_tree, hf_u3v_gencp_prefix, tvb, offset, 4, ENC_LITTLE_ENDIAN); offset += 4; /* Add the flags */ item = proto_tree_add_item(ccd_tree, hf_u3v_flag, tvb, offset, 2, ENC_LITTLE_ENDIAN); ccd_tree_flag = proto_item_add_subtree(item, ett_u3v_flags); proto_tree_add_item(ccd_tree_flag, hf_u3v_acknowledge_required_flag, tvb, offset, 2, ENC_LITTLE_ENDIAN); offset += 2; col_append_fstr(pinfo->cinfo, COL_INFO, "> %s ", command_string); } else if (prefix == U3V_CONTROL_PREFIX && ((command_id % 2) == 1)) { command_string = val_to_str(command_id,command_names,"Unknown Acknowledge (0x%x)"); item = proto_tree_add_item(u3v_tree, hf_u3v_ccd_ack, tvb, offset, 8, ENC_NA); proto_item_append_text(item, ": %s", command_string); ccd_tree = proto_item_add_subtree(item, ett_u3v_ack); /* Add the prefix code: */ proto_tree_add_item(ccd_tree, hf_u3v_gencp_prefix, tvb, offset, 4, ENC_LITTLE_ENDIAN); offset += 4; /* Add the status: */ proto_tree_add_item(ccd_tree, hf_u3v_status, tvb, offset, 2,ENC_LITTLE_ENDIAN); status = tvb_get_letohs(tvb, offset); offset += 2; col_append_fstr(pinfo->cinfo, COL_INFO, "< %s %s", command_string, val_to_str(status, status_names_short, "Unknown status (0x%04X)")); } else { return 0; } /* Add the command id*/ proto_tree_add_item(ccd_tree, hf_u3v_command_id, tvb, offset, 2,ENC_LITTLE_ENDIAN); offset += 2; /* Parse the second part of both the command and the acknowledge header: 0 15 16 31 -------- -------- -------- -------- | status | acknowledge | -------- -------- -------- -------- | length | req_id | -------- -------- -------- -------- Add the data length Number of valid data bytes in this message, not including this header. This represents the number of bytes of payload appended after this header */ proto_tree_add_item(ccd_tree, hf_u3v_length, tvb, offset, 2, ENC_LITTLE_ENDIAN); data_length = tvb_get_letohs(tvb, offset); offset += 2; /* Add the request ID */ proto_tree_add_item(ccd_tree, hf_u3v_request_id, tvb, offset, 2, ENC_LITTLE_ENDIAN); req_id = tvb_get_letohs(tvb, offset); offset += 2; /* Add telegram subtree */ u3v_telegram_tree = proto_item_add_subtree(u3v_tree, ett_u3v); if (!PINFO_FD_VISITED(pinfo)) { if ((command_id % 2) == 0) { /* This is a command */ gencp_trans = wmem_new(wmem_file_scope(), gencp_transaction_t); gencp_trans->cmd_frame = pinfo->fd->num; gencp_trans->ack_frame = 0; gencp_trans->cmd_time = pinfo->fd->abs_ts; /* add reference to current packet */ p_add_proto_data(wmem_file_scope(), pinfo, proto_u3v, req_id, gencp_trans); /* add reference to current */ u3v_conv_info->trans_info = gencp_trans; } else { gencp_trans = u3v_conv_info->trans_info; if (gencp_trans) { gencp_trans->ack_frame = pinfo->fd->num; /* add reference to current packet */ p_add_proto_data(wmem_file_scope(), pinfo, proto_u3v, req_id, gencp_trans); } } } else { gencp_trans = (gencp_transaction_t*)p_get_proto_data(wmem_file_scope(),pinfo, proto_u3v, req_id); } if (!gencp_trans) { /* create a "fake" gencp_trans structure */ gencp_trans = wmem_new(wmem_packet_scope(), gencp_transaction_t); gencp_trans->cmd_frame = 0; gencp_trans->ack_frame = 0; gencp_trans->cmd_time = pinfo->fd->abs_ts; } /* dissect depending on command? */ switch (command_id) { case U3V_READMEM_CMD: dissect_u3v_read_mem_cmd(u3v_telegram_tree, tvb, pinfo, offset, data_length,u3v_conv_info,gencp_trans); break; case U3V_WRITEMEM_CMD: dissect_u3v_write_mem_cmd(u3v_telegram_tree, tvb, pinfo, offset, data_length,u3v_conv_info,gencp_trans); break; case U3V_EVENT_CMD: dissect_u3v_event_cmd(u3v_telegram_tree, tvb, pinfo, offset, data_length); break; case U3V_READMEM_ACK: if ( U3V_STATUS_GENCP_SUCCESS == status ) { dissect_u3v_read_mem_ack(u3v_telegram_tree, tvb, pinfo, offset, data_length,u3v_conv_info,gencp_trans); } break; case U3V_WRITEMEM_ACK: dissect_u3v_write_mem_ack(u3v_telegram_tree, tvb, pinfo, offset, data_length, u3v_conv_info,gencp_trans); break; case U3V_PENDING_ACK: dissect_u3v_pending_ack(u3v_telegram_tree, tvb, pinfo, offset, data_length, u3v_conv_info,gencp_trans); break; default: proto_tree_add_item(u3v_telegram_tree, hf_u3v_payloaddata, tvb, offset, data_length, ENC_NA); break; } return data_length + 12; } else if ( stream_detected ) { /* this is streaming data */ /* init this stream configuration */ u3v_conv_info = (u3v_conv_info_t *)usb_conv_info->class_data; u3v_conv_info->ep_stream = usb_conv_info->endpoint; /* Set the protocol column */ col_set_str(pinfo->cinfo, COL_PROTOCOL, "U3V"); /* Clear out stuff in the info column */ col_clear(pinfo->cinfo, COL_INFO); /* Adds "USB3Vision" heading to protocol tree */ /* We will add fields to this using the u3v_tree pointer */ ti = proto_tree_add_item(tree, proto_u3v, tvb, offset, -1, ENC_NA); u3v_tree = proto_item_add_subtree(ti, ett_u3v); if(tvb_captured_length(tvb) >=4) { prefix = tvb_get_letohl(tvb, offset); switch (prefix) { case U3V_STREAM_LEADER_PREFIX: dissect_u3v_stream_leader(u3v_tree, tvb, pinfo, usb_conv_info); break; case U3V_STREAM_TRAILER_PREFIX: dissect_u3v_stream_trailer(u3v_tree, tvb, pinfo, usb_conv_info); break; default: dissect_u3v_stream_payload(u3v_tree, tvb, pinfo, usb_conv_info); break; } } return tvb_captured_length(tvb); } return 0; } Commit Message: Make class "type" for USB conversations. USB dissectors can't assume that only their class type has been passed around in the conversation. Make explicit check that class type expected matches the dissector and stop/prevent dissection if there isn't a match. Bug: 12356 Change-Id: Ib23973a4ebd0fbb51952ffc118daf95e3389a209 Reviewed-on: https://code.wireshark.org/review/15212 Petri-Dish: Michael Mann <[email protected]> Reviewed-by: Martin Kaiser <[email protected]> Petri-Dish: Martin Kaiser <[email protected]> Tested-by: Petri Dish Buildbot <[email protected]> Reviewed-by: Michael Mann <[email protected]> CWE ID: CWE-476
dissect_u3v(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data) { gint offset = 0; proto_tree *u3v_tree = NULL, *ccd_tree_flag, *u3v_telegram_tree = NULL, *ccd_tree = NULL; gint data_length = 0; gint req_id = 0; gint command_id = -1; gint status = 0; guint prefix = 0; proto_item *ti = NULL; proto_item *item = NULL; const char *command_string; usb_conv_info_t *usb_conv_info; gint stream_detected = FALSE; gint control_detected = FALSE; u3v_conv_info_t *u3v_conv_info = NULL; gencp_transaction_t *gencp_trans = NULL; usb_conv_info = (usb_conv_info_t *)data; /* decide if this packet belongs to U3V protocol */ u3v_conv_info = (u3v_conv_info_t *)usb_conv_info->class_data; if (!u3v_conv_info) { u3v_conv_info = wmem_new0(wmem_file_scope(), u3v_conv_info_t); usb_conv_info->class_data = u3v_conv_info; usb_conv_info->class_data_type = USB_CONV_U3V; } else if (usb_conv_info->class_data_type != USB_CONV_U3V) { /* Don't dissect if another USB type is in the conversation */ return 0; } prefix = tvb_get_letohl(tvb, 0); if ((tvb_reported_length(tvb) >= 4) && ( ( U3V_CONTROL_PREFIX == prefix ) || ( U3V_EVENT_PREFIX == prefix ) ) ) { control_detected = TRUE; } if (((tvb_reported_length(tvb) >= 4) && (( U3V_STREAM_LEADER_PREFIX == prefix ) || ( U3V_STREAM_TRAILER_PREFIX == prefix ))) || (usb_conv_info->endpoint == u3v_conv_info->ep_stream)) { stream_detected = TRUE; } /* initialize interface class/subclass in case no descriptors have been dissected yet */ if ( control_detected || stream_detected){ if ( usb_conv_info->interfaceClass == IF_CLASS_UNKNOWN && usb_conv_info->interfaceSubclass == IF_SUBCLASS_UNKNOWN){ usb_conv_info->interfaceClass = IF_CLASS_MISCELLANEOUS; usb_conv_info->interfaceSubclass = IF_SUBCLASS_MISC_U3V; } } if ( control_detected ) { /* Set the protocol column */ col_set_str(pinfo->cinfo, COL_PROTOCOL, "U3V"); /* Clear out stuff in the info column */ col_clear(pinfo->cinfo, COL_INFO); /* Adds "USB3Vision" heading to protocol tree */ /* We will add fields to this using the u3v_tree pointer */ ti = proto_tree_add_item(tree, proto_u3v, tvb, offset, -1, ENC_NA); u3v_tree = proto_item_add_subtree(ti, ett_u3v); prefix = tvb_get_letohl(tvb, offset); command_id = tvb_get_letohs(tvb, offset+6); /* decode CCD ( DCI/DCE command data layout) */ if ((prefix == U3V_CONTROL_PREFIX || prefix == U3V_EVENT_PREFIX) && ((command_id % 2) == 0)) { command_string = val_to_str(command_id,command_names,"Unknown Command (0x%x)"); item = proto_tree_add_item(u3v_tree, hf_u3v_ccd_cmd, tvb, offset, 8, ENC_NA); proto_item_append_text(item, ": %s", command_string); ccd_tree = proto_item_add_subtree(item, ett_u3v_cmd); /* Add the prefix code: */ proto_tree_add_item(ccd_tree, hf_u3v_gencp_prefix, tvb, offset, 4, ENC_LITTLE_ENDIAN); offset += 4; /* Add the flags */ item = proto_tree_add_item(ccd_tree, hf_u3v_flag, tvb, offset, 2, ENC_LITTLE_ENDIAN); ccd_tree_flag = proto_item_add_subtree(item, ett_u3v_flags); proto_tree_add_item(ccd_tree_flag, hf_u3v_acknowledge_required_flag, tvb, offset, 2, ENC_LITTLE_ENDIAN); offset += 2; col_append_fstr(pinfo->cinfo, COL_INFO, "> %s ", command_string); } else if (prefix == U3V_CONTROL_PREFIX && ((command_id % 2) == 1)) { command_string = val_to_str(command_id,command_names,"Unknown Acknowledge (0x%x)"); item = proto_tree_add_item(u3v_tree, hf_u3v_ccd_ack, tvb, offset, 8, ENC_NA); proto_item_append_text(item, ": %s", command_string); ccd_tree = proto_item_add_subtree(item, ett_u3v_ack); /* Add the prefix code: */ proto_tree_add_item(ccd_tree, hf_u3v_gencp_prefix, tvb, offset, 4, ENC_LITTLE_ENDIAN); offset += 4; /* Add the status: */ proto_tree_add_item(ccd_tree, hf_u3v_status, tvb, offset, 2,ENC_LITTLE_ENDIAN); status = tvb_get_letohs(tvb, offset); offset += 2; col_append_fstr(pinfo->cinfo, COL_INFO, "< %s %s", command_string, val_to_str(status, status_names_short, "Unknown status (0x%04X)")); } else { return 0; } /* Add the command id*/ proto_tree_add_item(ccd_tree, hf_u3v_command_id, tvb, offset, 2,ENC_LITTLE_ENDIAN); offset += 2; /* Parse the second part of both the command and the acknowledge header: 0 15 16 31 -------- -------- -------- -------- | status | acknowledge | -------- -------- -------- -------- | length | req_id | -------- -------- -------- -------- Add the data length Number of valid data bytes in this message, not including this header. This represents the number of bytes of payload appended after this header */ proto_tree_add_item(ccd_tree, hf_u3v_length, tvb, offset, 2, ENC_LITTLE_ENDIAN); data_length = tvb_get_letohs(tvb, offset); offset += 2; /* Add the request ID */ proto_tree_add_item(ccd_tree, hf_u3v_request_id, tvb, offset, 2, ENC_LITTLE_ENDIAN); req_id = tvb_get_letohs(tvb, offset); offset += 2; /* Add telegram subtree */ u3v_telegram_tree = proto_item_add_subtree(u3v_tree, ett_u3v); if (!PINFO_FD_VISITED(pinfo)) { if ((command_id % 2) == 0) { /* This is a command */ gencp_trans = wmem_new(wmem_file_scope(), gencp_transaction_t); gencp_trans->cmd_frame = pinfo->fd->num; gencp_trans->ack_frame = 0; gencp_trans->cmd_time = pinfo->fd->abs_ts; /* add reference to current packet */ p_add_proto_data(wmem_file_scope(), pinfo, proto_u3v, req_id, gencp_trans); /* add reference to current */ u3v_conv_info->trans_info = gencp_trans; } else { gencp_trans = u3v_conv_info->trans_info; if (gencp_trans) { gencp_trans->ack_frame = pinfo->fd->num; /* add reference to current packet */ p_add_proto_data(wmem_file_scope(), pinfo, proto_u3v, req_id, gencp_trans); } } } else { gencp_trans = (gencp_transaction_t*)p_get_proto_data(wmem_file_scope(),pinfo, proto_u3v, req_id); } if (!gencp_trans) { /* create a "fake" gencp_trans structure */ gencp_trans = wmem_new(wmem_packet_scope(), gencp_transaction_t); gencp_trans->cmd_frame = 0; gencp_trans->ack_frame = 0; gencp_trans->cmd_time = pinfo->fd->abs_ts; } /* dissect depending on command? */ switch (command_id) { case U3V_READMEM_CMD: dissect_u3v_read_mem_cmd(u3v_telegram_tree, tvb, pinfo, offset, data_length,u3v_conv_info,gencp_trans); break; case U3V_WRITEMEM_CMD: dissect_u3v_write_mem_cmd(u3v_telegram_tree, tvb, pinfo, offset, data_length,u3v_conv_info,gencp_trans); break; case U3V_EVENT_CMD: dissect_u3v_event_cmd(u3v_telegram_tree, tvb, pinfo, offset, data_length); break; case U3V_READMEM_ACK: if ( U3V_STATUS_GENCP_SUCCESS == status ) { dissect_u3v_read_mem_ack(u3v_telegram_tree, tvb, pinfo, offset, data_length,u3v_conv_info,gencp_trans); } break; case U3V_WRITEMEM_ACK: dissect_u3v_write_mem_ack(u3v_telegram_tree, tvb, pinfo, offset, data_length, u3v_conv_info,gencp_trans); break; case U3V_PENDING_ACK: dissect_u3v_pending_ack(u3v_telegram_tree, tvb, pinfo, offset, data_length, u3v_conv_info,gencp_trans); break; default: proto_tree_add_item(u3v_telegram_tree, hf_u3v_payloaddata, tvb, offset, data_length, ENC_NA); break; } return data_length + 12; } else if ( stream_detected ) { /* this is streaming data */ /* init this stream configuration */ u3v_conv_info = (u3v_conv_info_t *)usb_conv_info->class_data; u3v_conv_info->ep_stream = usb_conv_info->endpoint; /* Set the protocol column */ col_set_str(pinfo->cinfo, COL_PROTOCOL, "U3V"); /* Clear out stuff in the info column */ col_clear(pinfo->cinfo, COL_INFO); /* Adds "USB3Vision" heading to protocol tree */ /* We will add fields to this using the u3v_tree pointer */ ti = proto_tree_add_item(tree, proto_u3v, tvb, offset, -1, ENC_NA); u3v_tree = proto_item_add_subtree(ti, ett_u3v); if(tvb_captured_length(tvb) >=4) { prefix = tvb_get_letohl(tvb, offset); switch (prefix) { case U3V_STREAM_LEADER_PREFIX: dissect_u3v_stream_leader(u3v_tree, tvb, pinfo, usb_conv_info); break; case U3V_STREAM_TRAILER_PREFIX: dissect_u3v_stream_trailer(u3v_tree, tvb, pinfo, usb_conv_info); break; default: dissect_u3v_stream_payload(u3v_tree, tvb, pinfo, usb_conv_info); break; } } return tvb_captured_length(tvb); } return 0; }
167,152
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma) { struct au1100fb_device *fbdev; unsigned int len; unsigned long start=0, off; fbdev = to_au1100fb_device(fbi); if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) { return -EINVAL; } start = fbdev->fb_phys & PAGE_MASK; len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len); off = vma->vm_pgoff << PAGE_SHIFT; if ((vma->vm_end - vma->vm_start + off) > len) { return -EINVAL; } off += start; vma->vm_pgoff = off >> PAGE_SHIFT; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) { return -EAGAIN; } return 0; } Commit Message: Fix a few incorrectly checked [io_]remap_pfn_range() calls Nico Golde reports a few straggling uses of [io_]remap_pfn_range() that really should use the vm_iomap_memory() helper. This trivially converts two of them to the helper, and comments about why the third one really needs to continue to use remap_pfn_range(), and adds the missing size check. Reported-by: Nico Golde <[email protected]> Cc: [email protected] Signed-off-by: Linus Torvalds <[email protected]. CWE ID: CWE-119
int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma) { struct au1100fb_device *fbdev; fbdev = to_au1100fb_device(fbi); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6 return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len); }
165,935
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void sas_resume_port(struct asd_sas_phy *phy) { struct domain_device *dev; struct asd_sas_port *port = phy->port; struct sas_ha_struct *sas_ha = phy->ha; struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt); if (si->dft->lldd_port_formed) si->dft->lldd_port_formed(phy); if (port->suspended) port->suspended = 0; else { /* we only need to handle "link returned" actions once */ return; } /* if the port came back: * 1/ presume every device came back * 2/ force the next revalidation to check all expander phys */ list_for_each_entry(dev, &port->dev_list, dev_list_node) { int i, rc; rc = sas_notify_lldd_dev_found(dev); if (rc) { sas_unregister_dev(port, dev); continue; } if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { dev->ex_dev.ex_change_count = -1; for (i = 0; i < dev->ex_dev.num_phys; i++) { struct ex_phy *phy = &dev->ex_dev.ex_phy[i]; phy->phy_change_count = -1; } } } sas_discover_event(port, DISCE_RESUME); } Commit Message: scsi: libsas: direct call probe and destruct In commit 87c8331fcf72 ("[SCSI] libsas: prevent domain rediscovery competing with ata error handling") introduced disco mutex to prevent rediscovery competing with ata error handling and put the whole revalidation in the mutex. But the rphy add/remove needs to wait for the error handling which also grabs the disco mutex. This may leads to dead lock.So the probe and destruct event were introduce to do the rphy add/remove asynchronously and out of the lock. The asynchronously processed workers makes the whole discovery process not atomic, the other events may interrupt the process. For example, if a loss of signal event inserted before the probe event, the sas_deform_port() is called and the port will be deleted. And sas_port_delete() may run before the destruct event, but the port-x:x is the top parent of end device or expander. This leads to a kernel WARNING such as: [ 82.042979] sysfs group 'power' not found for kobject 'phy-1:0:22' [ 82.042983] ------------[ cut here ]------------ [ 82.042986] WARNING: CPU: 54 PID: 1714 at fs/sysfs/group.c:237 sysfs_remove_group+0x94/0xa0 [ 82.043059] Call trace: [ 82.043082] [<ffff0000082e7624>] sysfs_remove_group+0x94/0xa0 [ 82.043085] [<ffff00000864e320>] dpm_sysfs_remove+0x60/0x70 [ 82.043086] [<ffff00000863ee10>] device_del+0x138/0x308 [ 82.043089] [<ffff00000869a2d0>] sas_phy_delete+0x38/0x60 [ 82.043091] [<ffff00000869a86c>] do_sas_phy_delete+0x6c/0x80 [ 82.043093] [<ffff00000863dc20>] device_for_each_child+0x58/0xa0 [ 82.043095] [<ffff000008696f80>] sas_remove_children+0x40/0x50 [ 82.043100] [<ffff00000869d1bc>] sas_destruct_devices+0x64/0xa0 [ 82.043102] [<ffff0000080e93bc>] process_one_work+0x1fc/0x4b0 [ 82.043104] [<ffff0000080e96c0>] worker_thread+0x50/0x490 [ 82.043105] [<ffff0000080f0364>] kthread+0xfc/0x128 [ 82.043107] [<ffff0000080836c0>] ret_from_fork+0x10/0x50 Make probe and destruct a direct call in the disco and revalidate function, but put them outside the lock. The whole discovery or revalidate won't be interrupted by other events. And the DISCE_PROBE and DISCE_DESTRUCT event are deleted as a result of the direct call. Introduce a new list to destruct the sas_port and put the port delete after the destruct. This makes sure the right order of destroying the sysfs kobject and fix the warning above. In sas_ex_revalidate_domain() have a loop to find all broadcasted device, and sometimes we have a chance to find the same expander twice. Because the sas_port will be deleted at the end of the whole revalidate process, sas_port with the same name cannot be added before this. Otherwise the sysfs will complain of creating duplicate filename. Since the LLDD will send broadcast for every device change, we can only process one expander's revalidation. [mkp: kbuild test robot warning] Signed-off-by: Jason Yan <[email protected]> CC: John Garry <[email protected]> CC: Johannes Thumshirn <[email protected]> CC: Ewan Milne <[email protected]> CC: Christoph Hellwig <[email protected]> CC: Tomas Henzl <[email protected]> CC: Dan Williams <[email protected]> Reviewed-by: Hannes Reinecke <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]> CWE ID:
static void sas_resume_port(struct asd_sas_phy *phy) { struct domain_device *dev; struct asd_sas_port *port = phy->port; struct sas_ha_struct *sas_ha = phy->ha; struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt); if (si->dft->lldd_port_formed) si->dft->lldd_port_formed(phy); if (port->suspended) port->suspended = 0; else { /* we only need to handle "link returned" actions once */ return; } /* if the port came back: * 1/ presume every device came back * 2/ force the next revalidation to check all expander phys */ list_for_each_entry(dev, &port->dev_list, dev_list_node) { int i, rc; rc = sas_notify_lldd_dev_found(dev); if (rc) { sas_unregister_dev(port, dev); sas_destruct_devices(port); continue; } if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { dev->ex_dev.ex_change_count = -1; for (i = 0; i < dev->ex_dev.num_phys; i++) { struct ex_phy *phy = &dev->ex_dev.ex_phy[i]; phy->phy_change_count = -1; } } } sas_discover_event(port, DISCE_RESUME); }
169,395
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int use_env() { int indent; size_t flags = 0; json_t *json; json_error_t error; #ifdef _WIN32 /* On Windows, set stdout and stderr to binary mode to avoid outputting DOS line terminators */ _setmode(_fileno(stdout), _O_BINARY); _setmode(_fileno(stderr), _O_BINARY); #endif indent = getenv_int("JSON_INDENT"); if(indent < 0 || indent > 255) { fprintf(stderr, "invalid value for JSON_INDENT: %d\n", indent); return 2; } if(indent > 0) flags |= JSON_INDENT(indent); if(getenv_int("JSON_COMPACT") > 0) flags |= JSON_COMPACT; if(getenv_int("JSON_ENSURE_ASCII")) flags |= JSON_ENSURE_ASCII; if(getenv_int("JSON_PRESERVE_ORDER")) flags |= JSON_PRESERVE_ORDER; if(getenv_int("JSON_SORT_KEYS")) flags |= JSON_SORT_KEYS; if(getenv_int("STRIP")) { /* Load to memory, strip leading and trailing whitespace */ size_t size = 0, used = 0; char *buffer = NULL; while(1) { size_t count; size = (size == 0 ? 128 : size * 2); buffer = realloc(buffer, size); if(!buffer) { fprintf(stderr, "Unable to allocate %d bytes\n", (int)size); return 1; } count = fread(buffer + used, 1, size - used, stdin); if(count < size - used) { buffer[used + count] = '\0'; break; } used += count; } json = json_loads(strip(buffer), 0, &error); free(buffer); } else json = json_loadf(stdin, 0, &error); if(!json) { fprintf(stderr, "%d %d %d\n%s\n", error.line, error.column, error.position, error.text); return 1; } json_dumpf(json, stdout, flags); json_decref(json); return 0; } Commit Message: CVE-2013-6401: Change hash function, randomize hashes Thanks to Florian Weimer and Eric Sesterhenn for reporting, reviewing and testing. CWE ID: CWE-310
int use_env() { int indent; size_t flags = 0; json_t *json; json_error_t error; #ifdef _WIN32 /* On Windows, set stdout and stderr to binary mode to avoid outputting DOS line terminators */ _setmode(_fileno(stdout), _O_BINARY); _setmode(_fileno(stderr), _O_BINARY); #endif indent = getenv_int("JSON_INDENT"); if(indent < 0 || indent > 255) { fprintf(stderr, "invalid value for JSON_INDENT: %d\n", indent); return 2; } if(indent > 0) flags |= JSON_INDENT(indent); if(getenv_int("JSON_COMPACT") > 0) flags |= JSON_COMPACT; if(getenv_int("JSON_ENSURE_ASCII")) flags |= JSON_ENSURE_ASCII; if(getenv_int("JSON_PRESERVE_ORDER")) flags |= JSON_PRESERVE_ORDER; if(getenv_int("JSON_SORT_KEYS")) flags |= JSON_SORT_KEYS; if(getenv("HASHSEED")) json_object_seed(getenv_int("HASHSEED")); if(getenv_int("STRIP")) { /* Load to memory, strip leading and trailing whitespace */ size_t size = 0, used = 0; char *buffer = NULL; while(1) { size_t count; size = (size == 0 ? 128 : size * 2); buffer = realloc(buffer, size); if(!buffer) { fprintf(stderr, "Unable to allocate %d bytes\n", (int)size); return 1; } count = fread(buffer + used, 1, size - used, stdin); if(count < size - used) { buffer[used + count] = '\0'; break; } used += count; } json = json_loads(strip(buffer), 0, &error); free(buffer); } else json = json_loadf(stdin, 0, &error); if(!json) { fprintf(stderr, "%d %d %d\n%s\n", error.line, error.column, error.position, error.text); return 1; } json_dumpf(json, stdout, flags); json_decref(json); return 0; }
166,538
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: OMX_ERRORTYPE omx_video::get_parameter(OMX_IN OMX_HANDLETYPE hComp, OMX_IN OMX_INDEXTYPE paramIndex, OMX_INOUT OMX_PTR paramData) { (void)hComp; OMX_ERRORTYPE eRet = OMX_ErrorNone; unsigned int height=0,width = 0; DEBUG_PRINT_LOW("get_parameter:"); if (m_state == OMX_StateInvalid) { DEBUG_PRINT_ERROR("ERROR: Get Param in Invalid State"); return OMX_ErrorInvalidState; } if (paramData == NULL) { DEBUG_PRINT_ERROR("ERROR: Get Param in Invalid paramData"); return OMX_ErrorBadParameter; } switch ((int)paramIndex) { case OMX_IndexParamPortDefinition: { OMX_PARAM_PORTDEFINITIONTYPE *portDefn; portDefn = (OMX_PARAM_PORTDEFINITIONTYPE *) paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamPortDefinition"); if (portDefn->nPortIndex == (OMX_U32) PORT_INDEX_IN) { dev_get_buf_req (&m_sInPortDef.nBufferCountMin, &m_sInPortDef.nBufferCountActual, &m_sInPortDef.nBufferSize, m_sInPortDef.nPortIndex); DEBUG_PRINT_LOW("m_sInPortDef: size = %u, min cnt = %u, actual cnt = %u", (unsigned int)m_sInPortDef.nBufferSize, (unsigned int)m_sInPortDef.nBufferCountMin, (unsigned int)m_sInPortDef.nBufferCountActual); memcpy(portDefn, &m_sInPortDef, sizeof(m_sInPortDef)); #ifdef _ANDROID_ICS_ if (meta_mode_enable) { portDefn->nBufferSize = sizeof(encoder_media_buffer_type); } if (mUseProxyColorFormat) { portDefn->format.video.eColorFormat = (OMX_COLOR_FORMATTYPE)QOMX_COLOR_FormatAndroidOpaque; } #endif } else if (portDefn->nPortIndex == (OMX_U32) PORT_INDEX_OUT) { if (m_state != OMX_StateExecuting) { dev_get_buf_req (&m_sOutPortDef.nBufferCountMin, &m_sOutPortDef.nBufferCountActual, &m_sOutPortDef.nBufferSize, m_sOutPortDef.nPortIndex); } DEBUG_PRINT_LOW("m_sOutPortDef: size = %u, min cnt = %u, actual cnt = %u", (unsigned int)m_sOutPortDef.nBufferSize, (unsigned int)m_sOutPortDef.nBufferCountMin, (unsigned int)m_sOutPortDef.nBufferCountActual); memcpy(portDefn, &m_sOutPortDef, sizeof(m_sOutPortDef)); } else { DEBUG_PRINT_ERROR("ERROR: GetParameter called on Bad Port Index"); eRet = OMX_ErrorBadPortIndex; } break; } case OMX_IndexParamVideoInit: { OMX_PORT_PARAM_TYPE *portParamType = (OMX_PORT_PARAM_TYPE *) paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoInit"); memcpy(portParamType, &m_sPortParam, sizeof(m_sPortParam)); break; } case OMX_IndexParamVideoPortFormat: { OMX_VIDEO_PARAM_PORTFORMATTYPE *portFmt = (OMX_VIDEO_PARAM_PORTFORMATTYPE *)paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoPortFormat"); if (portFmt->nPortIndex == (OMX_U32) PORT_INDEX_IN) { unsigned index = portFmt->nIndex; int supportedFormats[] = { [0] = QOMX_COLOR_FORMATYUV420PackedSemiPlanar32m, [1] = QOMX_COLOR_FormatAndroidOpaque, [2] = OMX_COLOR_FormatYUV420SemiPlanar, }; if (index > (sizeof(supportedFormats)/sizeof(*supportedFormats) - 1)) eRet = OMX_ErrorNoMore; else { memcpy(portFmt, &m_sInPortFormat, sizeof(m_sInPortFormat)); portFmt->nIndex = index; //restore index set from client portFmt->eColorFormat = (OMX_COLOR_FORMATTYPE)supportedFormats[index]; } } else if (portFmt->nPortIndex == (OMX_U32) PORT_INDEX_OUT) { memcpy(portFmt, &m_sOutPortFormat, sizeof(m_sOutPortFormat)); } else { DEBUG_PRINT_ERROR("ERROR: GetParameter called on Bad Port Index"); eRet = OMX_ErrorBadPortIndex; } break; } case OMX_IndexParamVideoBitrate: { OMX_VIDEO_PARAM_BITRATETYPE* pParam = (OMX_VIDEO_PARAM_BITRATETYPE*)paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoBitrate"); if (pParam->nPortIndex == (OMX_U32) PORT_INDEX_OUT) { memcpy(pParam, &m_sParamBitrate, sizeof(m_sParamBitrate)); } else { DEBUG_PRINT_ERROR("ERROR: GetParameter called on Bad Port Index"); eRet = OMX_ErrorBadPortIndex; } break; } case OMX_IndexParamVideoMpeg4: { OMX_VIDEO_PARAM_MPEG4TYPE* pParam = (OMX_VIDEO_PARAM_MPEG4TYPE*)paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoMpeg4"); memcpy(pParam, &m_sParamMPEG4, sizeof(m_sParamMPEG4)); break; } case OMX_IndexParamVideoH263: { OMX_VIDEO_PARAM_H263TYPE* pParam = (OMX_VIDEO_PARAM_H263TYPE*)paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoH263"); memcpy(pParam, &m_sParamH263, sizeof(m_sParamH263)); break; } case OMX_IndexParamVideoAvc: { OMX_VIDEO_PARAM_AVCTYPE* pParam = (OMX_VIDEO_PARAM_AVCTYPE*)paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoAvc"); memcpy(pParam, &m_sParamAVC, sizeof(m_sParamAVC)); break; } case (OMX_INDEXTYPE)OMX_IndexParamVideoVp8: { OMX_VIDEO_PARAM_VP8TYPE* pParam = (OMX_VIDEO_PARAM_VP8TYPE*)paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoVp8"); memcpy(pParam, &m_sParamVP8, sizeof(m_sParamVP8)); break; } case (OMX_INDEXTYPE)OMX_IndexParamVideoHevc: { OMX_VIDEO_PARAM_HEVCTYPE* pParam = (OMX_VIDEO_PARAM_HEVCTYPE*)paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoHevc"); memcpy(pParam, &m_sParamHEVC, sizeof(m_sParamHEVC)); break; } case OMX_IndexParamVideoProfileLevelQuerySupported: { OMX_VIDEO_PARAM_PROFILELEVELTYPE* pParam = (OMX_VIDEO_PARAM_PROFILELEVELTYPE*)paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoProfileLevelQuerySupported"); eRet = get_supported_profile_level(pParam); if (eRet && eRet != OMX_ErrorNoMore) DEBUG_PRINT_ERROR("Invalid entry returned from get_supported_profile_level %u, %u", (unsigned int)pParam->eProfile, (unsigned int)pParam->eLevel); break; } case OMX_IndexParamVideoProfileLevelCurrent: { OMX_VIDEO_PARAM_PROFILELEVELTYPE* pParam = (OMX_VIDEO_PARAM_PROFILELEVELTYPE*)paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoProfileLevelCurrent"); memcpy(pParam, &m_sParamProfileLevel, sizeof(m_sParamProfileLevel)); break; } /*Component should support this port definition*/ case OMX_IndexParamAudioInit: { OMX_PORT_PARAM_TYPE *audioPortParamType = (OMX_PORT_PARAM_TYPE *) paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamAudioInit"); memcpy(audioPortParamType, &m_sPortParam_audio, sizeof(m_sPortParam_audio)); break; } /*Component should support this port definition*/ case OMX_IndexParamImageInit: { OMX_PORT_PARAM_TYPE *imagePortParamType = (OMX_PORT_PARAM_TYPE *) paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamImageInit"); memcpy(imagePortParamType, &m_sPortParam_img, sizeof(m_sPortParam_img)); break; } /*Component should support this port definition*/ case OMX_IndexParamOtherInit: { DEBUG_PRINT_ERROR("ERROR: get_parameter: OMX_IndexParamOtherInit %08x", paramIndex); eRet =OMX_ErrorUnsupportedIndex; break; } case OMX_IndexParamStandardComponentRole: { OMX_PARAM_COMPONENTROLETYPE *comp_role; comp_role = (OMX_PARAM_COMPONENTROLETYPE *) paramData; comp_role->nVersion.nVersion = OMX_SPEC_VERSION; comp_role->nSize = sizeof(*comp_role); DEBUG_PRINT_LOW("Getparameter: OMX_IndexParamStandardComponentRole %d",paramIndex); strlcpy((char*)comp_role->cRole,(const char*)m_cRole,OMX_MAX_STRINGNAME_SIZE); break; } /* Added for parameter test */ case OMX_IndexParamPriorityMgmt: { OMX_PRIORITYMGMTTYPE *priorityMgmType = (OMX_PRIORITYMGMTTYPE *) paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamPriorityMgmt"); memcpy(priorityMgmType, &m_sPriorityMgmt, sizeof(m_sPriorityMgmt)); break; } /* Added for parameter test */ case OMX_IndexParamCompBufferSupplier: { OMX_PARAM_BUFFERSUPPLIERTYPE *bufferSupplierType = (OMX_PARAM_BUFFERSUPPLIERTYPE*) paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamCompBufferSupplier"); if (bufferSupplierType->nPortIndex ==(OMX_U32) PORT_INDEX_IN) { memcpy(bufferSupplierType, &m_sInBufSupplier, sizeof(m_sInBufSupplier)); } else if (bufferSupplierType->nPortIndex ==(OMX_U32) PORT_INDEX_OUT) { memcpy(bufferSupplierType, &m_sOutBufSupplier, sizeof(m_sOutBufSupplier)); } else { DEBUG_PRINT_ERROR("ERROR: GetParameter called on Bad Port Index"); eRet = OMX_ErrorBadPortIndex; } break; } case OMX_IndexParamVideoQuantization: { OMX_VIDEO_PARAM_QUANTIZATIONTYPE *session_qp = (OMX_VIDEO_PARAM_QUANTIZATIONTYPE*) paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoQuantization"); memcpy(session_qp, &m_sSessionQuantization, sizeof(m_sSessionQuantization)); break; } case OMX_QcomIndexParamVideoQPRange: { OMX_QCOM_VIDEO_PARAM_QPRANGETYPE *qp_range = (OMX_QCOM_VIDEO_PARAM_QPRANGETYPE*) paramData; DEBUG_PRINT_LOW("get_parameter: OMX_QcomIndexParamVideoQPRange"); memcpy(qp_range, &m_sSessionQPRange, sizeof(m_sSessionQPRange)); break; } case OMX_IndexParamVideoErrorCorrection: { OMX_VIDEO_PARAM_ERRORCORRECTIONTYPE* errorresilience = (OMX_VIDEO_PARAM_ERRORCORRECTIONTYPE*)paramData; DEBUG_PRINT_LOW("OMX_IndexParamVideoErrorCorrection"); errorresilience->bEnableHEC = m_sErrorCorrection.bEnableHEC; errorresilience->bEnableResync = m_sErrorCorrection.bEnableResync; errorresilience->nResynchMarkerSpacing = m_sErrorCorrection.nResynchMarkerSpacing; break; } case OMX_IndexParamVideoIntraRefresh: { OMX_VIDEO_PARAM_INTRAREFRESHTYPE* intrarefresh = (OMX_VIDEO_PARAM_INTRAREFRESHTYPE*)paramData; DEBUG_PRINT_LOW("OMX_IndexParamVideoIntraRefresh"); DEBUG_PRINT_ERROR("OMX_IndexParamVideoIntraRefresh GET"); intrarefresh->eRefreshMode = m_sIntraRefresh.eRefreshMode; intrarefresh->nCirMBs = m_sIntraRefresh.nCirMBs; break; } case OMX_QcomIndexPortDefn: break; case OMX_COMPONENT_CAPABILITY_TYPE_INDEX: { OMXComponentCapabilityFlagsType *pParam = reinterpret_cast<OMXComponentCapabilityFlagsType*>(paramData); DEBUG_PRINT_LOW("get_parameter: OMX_COMPONENT_CAPABILITY_TYPE_INDEX"); pParam->iIsOMXComponentMultiThreaded = OMX_TRUE; pParam->iOMXComponentSupportsExternalOutputBufferAlloc = OMX_FALSE; pParam->iOMXComponentSupportsExternalInputBufferAlloc = OMX_TRUE; pParam->iOMXComponentSupportsMovableInputBuffers = OMX_TRUE; pParam->iOMXComponentUsesNALStartCodes = OMX_TRUE; pParam->iOMXComponentSupportsPartialFrames = OMX_FALSE; pParam->iOMXComponentCanHandleIncompleteFrames = OMX_FALSE; pParam->iOMXComponentUsesFullAVCFrames = OMX_FALSE; m_use_input_pmem = OMX_TRUE; DEBUG_PRINT_LOW("Supporting capability index in encoder node"); break; } #if !defined(MAX_RES_720P) || defined(_MSM8974_) case OMX_QcomIndexParamIndexExtraDataType: { DEBUG_PRINT_LOW("get_parameter: OMX_QcomIndexParamIndexExtraDataType"); QOMX_INDEXEXTRADATATYPE *pParam = (QOMX_INDEXEXTRADATATYPE *)paramData; if (pParam->nIndex == (OMX_INDEXTYPE)OMX_ExtraDataVideoEncoderSliceInfo) { if (pParam->nPortIndex == PORT_INDEX_OUT) { pParam->bEnabled = (OMX_BOOL)(m_sExtraData & VEN_EXTRADATA_SLICEINFO); DEBUG_PRINT_HIGH("Slice Info extradata %d", pParam->bEnabled); } else { DEBUG_PRINT_ERROR("get_parameter: slice information is " "valid for output port only"); eRet =OMX_ErrorUnsupportedIndex; } } else if (pParam->nIndex == (OMX_INDEXTYPE)OMX_ExtraDataVideoEncoderMBInfo) { if (pParam->nPortIndex == PORT_INDEX_OUT) { pParam->bEnabled = (OMX_BOOL)(m_sExtraData & VEN_EXTRADATA_MBINFO); DEBUG_PRINT_HIGH("MB Info extradata %d", pParam->bEnabled); } else { DEBUG_PRINT_ERROR("get_parameter: MB information is " "valid for output port only"); eRet = OMX_ErrorUnsupportedIndex; } } #ifndef _MSM8974_ else if (pParam->nIndex == (OMX_INDEXTYPE)OMX_ExtraDataVideoLTRInfo) { if (pParam->nPortIndex == PORT_INDEX_OUT) { pParam->bEnabled = (OMX_BOOL)(m_sExtraData & VEN_EXTRADATA_LTRINFO); DEBUG_PRINT_HIGH("LTR Info extradata %d", pParam->bEnabled); } else { DEBUG_PRINT_ERROR("get_parameter: LTR information is " "valid for output port only"); eRet = OMX_ErrorUnsupportedIndex; } } #endif else { DEBUG_PRINT_ERROR("get_parameter: unsupported extradata index (0x%x)", pParam->nIndex); eRet = OMX_ErrorUnsupportedIndex; } break; } case QOMX_IndexParamVideoLTRCountRangeSupported: { DEBUG_PRINT_HIGH("get_parameter: QOMX_IndexParamVideoLTRCountRangeSupported"); QOMX_EXTNINDEX_RANGETYPE *pParam = (QOMX_EXTNINDEX_RANGETYPE *)paramData; if (pParam->nPortIndex == PORT_INDEX_OUT) { OMX_U32 min = 0, max = 0, step_size = 0; if (dev_get_capability_ltrcount(&min, &max, &step_size)) { pParam->nMin = min; pParam->nMax = max; pParam->nStepSize = step_size; } else { DEBUG_PRINT_ERROR("get_parameter: get_capability_ltrcount failed"); eRet = OMX_ErrorUndefined; } } else { DEBUG_PRINT_ERROR("LTR count range is valid for output port only"); eRet = OMX_ErrorUnsupportedIndex; } } break; case OMX_QcomIndexParamVideoLTRCount: { DEBUG_PRINT_LOW("get_parameter: OMX_QcomIndexParamVideoLTRCount"); OMX_QCOM_VIDEO_PARAM_LTRCOUNT_TYPE *pParam = reinterpret_cast<OMX_QCOM_VIDEO_PARAM_LTRCOUNT_TYPE*>(paramData); memcpy(pParam, &m_sParamLTRCount, sizeof(m_sParamLTRCount)); break; } #endif case QOMX_IndexParamVideoSyntaxHdr: { DEBUG_PRINT_HIGH("QOMX_IndexParamVideoSyntaxHdr"); QOMX_EXTNINDEX_PARAMTYPE* pParam = reinterpret_cast<QOMX_EXTNINDEX_PARAMTYPE*>(paramData); if (pParam->pData == NULL) { DEBUG_PRINT_ERROR("Error: Data buffer is NULL"); eRet = OMX_ErrorBadParameter; break; } if (get_syntaxhdr_enable == false) { DEBUG_PRINT_ERROR("ERROR: get_parameter: Get syntax header disabled"); eRet = OMX_ErrorUnsupportedIndex; break; } BITMASK_SET(&m_flags, OMX_COMPONENT_LOADED_START_PENDING); if (dev_loaded_start()) { DEBUG_PRINT_LOW("device start successful"); } else { DEBUG_PRINT_ERROR("device start failed"); BITMASK_CLEAR(&m_flags, OMX_COMPONENT_LOADED_START_PENDING); return OMX_ErrorHardware; } if (dev_get_seq_hdr(pParam->pData, (unsigned)(pParam->nSize - sizeof(QOMX_EXTNINDEX_PARAMTYPE)), (unsigned *)(void *)&pParam->nDataSize)) { DEBUG_PRINT_HIGH("get syntax header successful (hdrlen = %u)", (unsigned int)pParam->nDataSize); for (unsigned i = 0; i < pParam->nDataSize; i++) { DEBUG_PRINT_LOW("Header[%d] = %x", i, *((char *)pParam->pData + i)); } } else { DEBUG_PRINT_ERROR("Error returned from GetSyntaxHeader()"); eRet = OMX_ErrorHardware; } BITMASK_SET(&m_flags, OMX_COMPONENT_LOADED_STOP_PENDING); if (dev_loaded_stop()) { DEBUG_PRINT_LOW("device stop successful"); } else { DEBUG_PRINT_ERROR("device stop failed"); BITMASK_CLEAR(&m_flags, OMX_COMPONENT_LOADED_STOP_PENDING); eRet = OMX_ErrorHardware; } break; } case OMX_QcomIndexHierarchicalStructure: { QOMX_VIDEO_HIERARCHICALLAYERS* hierp = (QOMX_VIDEO_HIERARCHICALLAYERS*) paramData; DEBUG_PRINT_LOW("get_parameter: OMX_QcomIndexHierarchicalStructure"); memcpy(hierp, &m_sHierLayers, sizeof(m_sHierLayers)); break; } case OMX_QcomIndexParamPerfLevel: { OMX_U32 perflevel; OMX_QCOM_VIDEO_PARAM_PERF_LEVEL *pParam = reinterpret_cast<OMX_QCOM_VIDEO_PARAM_PERF_LEVEL*>(paramData); DEBUG_PRINT_LOW("get_parameter: OMX_QcomIndexParamPerfLevel"); if (!dev_get_performance_level(&perflevel)) { DEBUG_PRINT_ERROR("Invalid entry returned from get_performance_level %d", pParam->ePerfLevel); } else { pParam->ePerfLevel = (QOMX_VIDEO_PERF_LEVEL)perflevel; } break; } case OMX_QcomIndexParamH264VUITimingInfo: { OMX_U32 enabled; OMX_QCOM_VIDEO_PARAM_VUI_TIMING_INFO *pParam = reinterpret_cast<OMX_QCOM_VIDEO_PARAM_VUI_TIMING_INFO*>(paramData); DEBUG_PRINT_LOW("get_parameter: OMX_QcomIndexParamH264VUITimingInfo"); if (!dev_get_vui_timing_info(&enabled)) { DEBUG_PRINT_ERROR("Invalid entry returned from get_vui_Timing_info %d", pParam->bEnable); } else { pParam->bEnable = (OMX_BOOL)enabled; } break; } case OMX_QcomIndexParamPeakBitrate: { OMX_U32 peakbitrate; OMX_QCOM_VIDEO_PARAM_PEAK_BITRATE *pParam = reinterpret_cast<OMX_QCOM_VIDEO_PARAM_PEAK_BITRATE*>(paramData); DEBUG_PRINT_LOW("get_parameter: OMX_QcomIndexParamPeakBitrate"); if (!dev_get_peak_bitrate(&peakbitrate)) { DEBUG_PRINT_ERROR("Invalid entry returned from get_peak_bitrate %u", (unsigned int)pParam->nPeakBitrate); } else { pParam->nPeakBitrate = peakbitrate; } break; } case QOMX_IndexParamVideoInitialQp: { QOMX_EXTNINDEX_VIDEO_INITIALQP* initqp = reinterpret_cast<QOMX_EXTNINDEX_VIDEO_INITIALQP *>(paramData); memcpy(initqp, &m_sParamInitqp, sizeof(m_sParamInitqp)); break; } case OMX_IndexParamVideoSliceFMO: default: { DEBUG_PRINT_LOW("ERROR: get_parameter: unknown param %08x", paramIndex); eRet =OMX_ErrorUnsupportedIndex; break; } } return eRet; } Commit Message: DO NOT MERGE mm-video-v4l2: vidc: validate omx param/config data Check the sanity of config/param strcuture objects passed to get/set _ config()/parameter() methods. Bug: 27533317 Security Vulnerability in MediaServer omx_vdec::get_config() Can lead to arbitrary write Change-Id: I6c3243afe12055ab94f1a1ecf758c10e88231809 Conflicts: mm-core/inc/OMX_QCOMExtns.h mm-video-v4l2/vidc/vdec/src/omx_vdec_msm8974.cpp mm-video-v4l2/vidc/venc/src/omx_video_base.cpp mm-video-v4l2/vidc/venc/src/omx_video_encoder.cpp CWE ID: CWE-20
OMX_ERRORTYPE omx_video::get_parameter(OMX_IN OMX_HANDLETYPE hComp, OMX_IN OMX_INDEXTYPE paramIndex, OMX_INOUT OMX_PTR paramData) { (void)hComp; OMX_ERRORTYPE eRet = OMX_ErrorNone; unsigned int height=0,width = 0; DEBUG_PRINT_LOW("get_parameter:"); if (m_state == OMX_StateInvalid) { DEBUG_PRINT_ERROR("ERROR: Get Param in Invalid State"); return OMX_ErrorInvalidState; } if (paramData == NULL) { DEBUG_PRINT_ERROR("ERROR: Get Param in Invalid paramData"); return OMX_ErrorBadParameter; } switch ((int)paramIndex) { case OMX_IndexParamPortDefinition: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_PARAM_PORTDEFINITIONTYPE); OMX_PARAM_PORTDEFINITIONTYPE *portDefn; portDefn = (OMX_PARAM_PORTDEFINITIONTYPE *) paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamPortDefinition"); if (portDefn->nPortIndex == (OMX_U32) PORT_INDEX_IN) { dev_get_buf_req (&m_sInPortDef.nBufferCountMin, &m_sInPortDef.nBufferCountActual, &m_sInPortDef.nBufferSize, m_sInPortDef.nPortIndex); DEBUG_PRINT_LOW("m_sInPortDef: size = %u, min cnt = %u, actual cnt = %u", (unsigned int)m_sInPortDef.nBufferSize, (unsigned int)m_sInPortDef.nBufferCountMin, (unsigned int)m_sInPortDef.nBufferCountActual); memcpy(portDefn, &m_sInPortDef, sizeof(m_sInPortDef)); #ifdef _ANDROID_ICS_ if (meta_mode_enable) { portDefn->nBufferSize = sizeof(encoder_media_buffer_type); } if (mUseProxyColorFormat) { portDefn->format.video.eColorFormat = (OMX_COLOR_FORMATTYPE)QOMX_COLOR_FormatAndroidOpaque; } #endif } else if (portDefn->nPortIndex == (OMX_U32) PORT_INDEX_OUT) { if (m_state != OMX_StateExecuting) { dev_get_buf_req (&m_sOutPortDef.nBufferCountMin, &m_sOutPortDef.nBufferCountActual, &m_sOutPortDef.nBufferSize, m_sOutPortDef.nPortIndex); } DEBUG_PRINT_LOW("m_sOutPortDef: size = %u, min cnt = %u, actual cnt = %u", (unsigned int)m_sOutPortDef.nBufferSize, (unsigned int)m_sOutPortDef.nBufferCountMin, (unsigned int)m_sOutPortDef.nBufferCountActual); memcpy(portDefn, &m_sOutPortDef, sizeof(m_sOutPortDef)); } else { DEBUG_PRINT_ERROR("ERROR: GetParameter called on Bad Port Index"); eRet = OMX_ErrorBadPortIndex; } break; } case OMX_IndexParamVideoInit: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_PORT_PARAM_TYPE); OMX_PORT_PARAM_TYPE *portParamType = (OMX_PORT_PARAM_TYPE *) paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoInit"); memcpy(portParamType, &m_sPortParam, sizeof(m_sPortParam)); break; } case OMX_IndexParamVideoPortFormat: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_VIDEO_PARAM_PORTFORMATTYPE); OMX_VIDEO_PARAM_PORTFORMATTYPE *portFmt = (OMX_VIDEO_PARAM_PORTFORMATTYPE *)paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoPortFormat"); if (portFmt->nPortIndex == (OMX_U32) PORT_INDEX_IN) { unsigned index = portFmt->nIndex; int supportedFormats[] = { [0] = QOMX_COLOR_FORMATYUV420PackedSemiPlanar32m, [1] = QOMX_COLOR_FormatAndroidOpaque, [2] = OMX_COLOR_FormatYUV420SemiPlanar, }; if (index > (sizeof(supportedFormats)/sizeof(*supportedFormats) - 1)) eRet = OMX_ErrorNoMore; else { memcpy(portFmt, &m_sInPortFormat, sizeof(m_sInPortFormat)); portFmt->nIndex = index; //restore index set from client portFmt->eColorFormat = (OMX_COLOR_FORMATTYPE)supportedFormats[index]; } } else if (portFmt->nPortIndex == (OMX_U32) PORT_INDEX_OUT) { memcpy(portFmt, &m_sOutPortFormat, sizeof(m_sOutPortFormat)); } else { DEBUG_PRINT_ERROR("ERROR: GetParameter called on Bad Port Index"); eRet = OMX_ErrorBadPortIndex; } break; } case OMX_IndexParamVideoBitrate: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_VIDEO_PARAM_BITRATETYPE); OMX_VIDEO_PARAM_BITRATETYPE* pParam = (OMX_VIDEO_PARAM_BITRATETYPE*)paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoBitrate"); if (pParam->nPortIndex == (OMX_U32) PORT_INDEX_OUT) { memcpy(pParam, &m_sParamBitrate, sizeof(m_sParamBitrate)); } else { DEBUG_PRINT_ERROR("ERROR: GetParameter called on Bad Port Index"); eRet = OMX_ErrorBadPortIndex; } break; } case OMX_IndexParamVideoMpeg4: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_VIDEO_PARAM_MPEG4TYPE); OMX_VIDEO_PARAM_MPEG4TYPE* pParam = (OMX_VIDEO_PARAM_MPEG4TYPE*)paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoMpeg4"); memcpy(pParam, &m_sParamMPEG4, sizeof(m_sParamMPEG4)); break; } case OMX_IndexParamVideoH263: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_VIDEO_PARAM_H263TYPE); OMX_VIDEO_PARAM_H263TYPE* pParam = (OMX_VIDEO_PARAM_H263TYPE*)paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoH263"); memcpy(pParam, &m_sParamH263, sizeof(m_sParamH263)); break; } case OMX_IndexParamVideoAvc: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_VIDEO_PARAM_AVCTYPE); OMX_VIDEO_PARAM_AVCTYPE* pParam = (OMX_VIDEO_PARAM_AVCTYPE*)paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoAvc"); memcpy(pParam, &m_sParamAVC, sizeof(m_sParamAVC)); break; } case (OMX_INDEXTYPE)OMX_IndexParamVideoVp8: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_VIDEO_PARAM_VP8TYPE); OMX_VIDEO_PARAM_VP8TYPE* pParam = (OMX_VIDEO_PARAM_VP8TYPE*)paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoVp8"); memcpy(pParam, &m_sParamVP8, sizeof(m_sParamVP8)); break; } case (OMX_INDEXTYPE)OMX_IndexParamVideoHevc: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_VIDEO_PARAM_HEVCTYPE); OMX_VIDEO_PARAM_HEVCTYPE* pParam = (OMX_VIDEO_PARAM_HEVCTYPE*)paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoHevc"); memcpy(pParam, &m_sParamHEVC, sizeof(m_sParamHEVC)); break; } case OMX_IndexParamVideoProfileLevelQuerySupported: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_VIDEO_PARAM_PROFILELEVELTYPE); OMX_VIDEO_PARAM_PROFILELEVELTYPE* pParam = (OMX_VIDEO_PARAM_PROFILELEVELTYPE*)paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoProfileLevelQuerySupported"); eRet = get_supported_profile_level(pParam); if (eRet && eRet != OMX_ErrorNoMore) DEBUG_PRINT_ERROR("Invalid entry returned from get_supported_profile_level %u, %u", (unsigned int)pParam->eProfile, (unsigned int)pParam->eLevel); break; } case OMX_IndexParamVideoProfileLevelCurrent: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_VIDEO_PARAM_PROFILELEVELTYPE); OMX_VIDEO_PARAM_PROFILELEVELTYPE* pParam = (OMX_VIDEO_PARAM_PROFILELEVELTYPE*)paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoProfileLevelCurrent"); memcpy(pParam, &m_sParamProfileLevel, sizeof(m_sParamProfileLevel)); break; } /*Component should support this port definition*/ case OMX_IndexParamAudioInit: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_PORT_PARAM_TYPE); OMX_PORT_PARAM_TYPE *audioPortParamType = (OMX_PORT_PARAM_TYPE *) paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamAudioInit"); memcpy(audioPortParamType, &m_sPortParam_audio, sizeof(m_sPortParam_audio)); break; } /*Component should support this port definition*/ case OMX_IndexParamImageInit: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_PORT_PARAM_TYPE); OMX_PORT_PARAM_TYPE *imagePortParamType = (OMX_PORT_PARAM_TYPE *) paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamImageInit"); memcpy(imagePortParamType, &m_sPortParam_img, sizeof(m_sPortParam_img)); break; } /*Component should support this port definition*/ case OMX_IndexParamOtherInit: { DEBUG_PRINT_ERROR("ERROR: get_parameter: OMX_IndexParamOtherInit %08x", paramIndex); eRet =OMX_ErrorUnsupportedIndex; break; } case OMX_IndexParamStandardComponentRole: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_PARAM_COMPONENTROLETYPE); OMX_PARAM_COMPONENTROLETYPE *comp_role; comp_role = (OMX_PARAM_COMPONENTROLETYPE *) paramData; comp_role->nVersion.nVersion = OMX_SPEC_VERSION; comp_role->nSize = sizeof(*comp_role); DEBUG_PRINT_LOW("Getparameter: OMX_IndexParamStandardComponentRole %d",paramIndex); strlcpy((char*)comp_role->cRole,(const char*)m_cRole,OMX_MAX_STRINGNAME_SIZE); break; } /* Added for parameter test */ case OMX_IndexParamPriorityMgmt: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_PRIORITYMGMTTYPE); OMX_PRIORITYMGMTTYPE *priorityMgmType = (OMX_PRIORITYMGMTTYPE *) paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamPriorityMgmt"); memcpy(priorityMgmType, &m_sPriorityMgmt, sizeof(m_sPriorityMgmt)); break; } /* Added for parameter test */ case OMX_IndexParamCompBufferSupplier: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_PARAM_BUFFERSUPPLIERTYPE); OMX_PARAM_BUFFERSUPPLIERTYPE *bufferSupplierType = (OMX_PARAM_BUFFERSUPPLIERTYPE*) paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamCompBufferSupplier"); if (bufferSupplierType->nPortIndex ==(OMX_U32) PORT_INDEX_IN) { memcpy(bufferSupplierType, &m_sInBufSupplier, sizeof(m_sInBufSupplier)); } else if (bufferSupplierType->nPortIndex ==(OMX_U32) PORT_INDEX_OUT) { memcpy(bufferSupplierType, &m_sOutBufSupplier, sizeof(m_sOutBufSupplier)); } else { DEBUG_PRINT_ERROR("ERROR: GetParameter called on Bad Port Index"); eRet = OMX_ErrorBadPortIndex; } break; } case OMX_IndexParamVideoQuantization: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_VIDEO_PARAM_QUANTIZATIONTYPE); OMX_VIDEO_PARAM_QUANTIZATIONTYPE *session_qp = (OMX_VIDEO_PARAM_QUANTIZATIONTYPE*) paramData; DEBUG_PRINT_LOW("get_parameter: OMX_IndexParamVideoQuantization"); memcpy(session_qp, &m_sSessionQuantization, sizeof(m_sSessionQuantization)); break; } case OMX_QcomIndexParamVideoQPRange: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_QCOM_VIDEO_PARAM_QPRANGETYPE); OMX_QCOM_VIDEO_PARAM_QPRANGETYPE *qp_range = (OMX_QCOM_VIDEO_PARAM_QPRANGETYPE*) paramData; DEBUG_PRINT_LOW("get_parameter: OMX_QcomIndexParamVideoQPRange"); memcpy(qp_range, &m_sSessionQPRange, sizeof(m_sSessionQPRange)); break; } case OMX_IndexParamVideoErrorCorrection: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_VIDEO_PARAM_ERRORCORRECTIONTYPE); OMX_VIDEO_PARAM_ERRORCORRECTIONTYPE* errorresilience = (OMX_VIDEO_PARAM_ERRORCORRECTIONTYPE*)paramData; DEBUG_PRINT_LOW("OMX_IndexParamVideoErrorCorrection"); errorresilience->bEnableHEC = m_sErrorCorrection.bEnableHEC; errorresilience->bEnableResync = m_sErrorCorrection.bEnableResync; errorresilience->nResynchMarkerSpacing = m_sErrorCorrection.nResynchMarkerSpacing; break; } case OMX_IndexParamVideoIntraRefresh: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_VIDEO_PARAM_INTRAREFRESHTYPE); OMX_VIDEO_PARAM_INTRAREFRESHTYPE* intrarefresh = (OMX_VIDEO_PARAM_INTRAREFRESHTYPE*)paramData; DEBUG_PRINT_LOW("OMX_IndexParamVideoIntraRefresh"); DEBUG_PRINT_ERROR("OMX_IndexParamVideoIntraRefresh GET"); intrarefresh->eRefreshMode = m_sIntraRefresh.eRefreshMode; intrarefresh->nCirMBs = m_sIntraRefresh.nCirMBs; break; } case OMX_QcomIndexPortDefn: break; case OMX_COMPONENT_CAPABILITY_TYPE_INDEX: { VALIDATE_OMX_PARAM_DATA(paramData, OMXComponentCapabilityFlagsType); OMXComponentCapabilityFlagsType *pParam = reinterpret_cast<OMXComponentCapabilityFlagsType*>(paramData); DEBUG_PRINT_LOW("get_parameter: OMX_COMPONENT_CAPABILITY_TYPE_INDEX"); pParam->iIsOMXComponentMultiThreaded = OMX_TRUE; pParam->iOMXComponentSupportsExternalOutputBufferAlloc = OMX_FALSE; pParam->iOMXComponentSupportsExternalInputBufferAlloc = OMX_TRUE; pParam->iOMXComponentSupportsMovableInputBuffers = OMX_TRUE; pParam->iOMXComponentUsesNALStartCodes = OMX_TRUE; pParam->iOMXComponentSupportsPartialFrames = OMX_FALSE; pParam->iOMXComponentCanHandleIncompleteFrames = OMX_FALSE; pParam->iOMXComponentUsesFullAVCFrames = OMX_FALSE; m_use_input_pmem = OMX_TRUE; DEBUG_PRINT_LOW("Supporting capability index in encoder node"); break; } #if !defined(MAX_RES_720P) || defined(_MSM8974_) case OMX_QcomIndexParamIndexExtraDataType: { VALIDATE_OMX_PARAM_DATA(paramData, QOMX_INDEXEXTRADATATYPE); DEBUG_PRINT_LOW("get_parameter: OMX_QcomIndexParamIndexExtraDataType"); QOMX_INDEXEXTRADATATYPE *pParam = (QOMX_INDEXEXTRADATATYPE *)paramData; if (pParam->nIndex == (OMX_INDEXTYPE)OMX_ExtraDataVideoEncoderSliceInfo) { if (pParam->nPortIndex == PORT_INDEX_OUT) { pParam->bEnabled = (OMX_BOOL)(m_sExtraData & VEN_EXTRADATA_SLICEINFO); DEBUG_PRINT_HIGH("Slice Info extradata %d", pParam->bEnabled); } else { DEBUG_PRINT_ERROR("get_parameter: slice information is " "valid for output port only"); eRet =OMX_ErrorUnsupportedIndex; } } else if (pParam->nIndex == (OMX_INDEXTYPE)OMX_ExtraDataVideoEncoderMBInfo) { if (pParam->nPortIndex == PORT_INDEX_OUT) { pParam->bEnabled = (OMX_BOOL)(m_sExtraData & VEN_EXTRADATA_MBINFO); DEBUG_PRINT_HIGH("MB Info extradata %d", pParam->bEnabled); } else { DEBUG_PRINT_ERROR("get_parameter: MB information is " "valid for output port only"); eRet = OMX_ErrorUnsupportedIndex; } } #ifndef _MSM8974_ else if (pParam->nIndex == (OMX_INDEXTYPE)OMX_ExtraDataVideoLTRInfo) { if (pParam->nPortIndex == PORT_INDEX_OUT) { pParam->bEnabled = (OMX_BOOL)(m_sExtraData & VEN_EXTRADATA_LTRINFO); DEBUG_PRINT_HIGH("LTR Info extradata %d", pParam->bEnabled); } else { DEBUG_PRINT_ERROR("get_parameter: LTR information is " "valid for output port only"); eRet = OMX_ErrorUnsupportedIndex; } } #endif else { DEBUG_PRINT_ERROR("get_parameter: unsupported extradata index (0x%x)", pParam->nIndex); eRet = OMX_ErrorUnsupportedIndex; } break; } case QOMX_IndexParamVideoLTRCountRangeSupported: { VALIDATE_OMX_PARAM_DATA(paramData, QOMX_EXTNINDEX_RANGETYPE); DEBUG_PRINT_HIGH("get_parameter: QOMX_IndexParamVideoLTRCountRangeSupported"); QOMX_EXTNINDEX_RANGETYPE *pParam = (QOMX_EXTNINDEX_RANGETYPE *)paramData; if (pParam->nPortIndex == PORT_INDEX_OUT) { OMX_U32 min = 0, max = 0, step_size = 0; if (dev_get_capability_ltrcount(&min, &max, &step_size)) { pParam->nMin = min; pParam->nMax = max; pParam->nStepSize = step_size; } else { DEBUG_PRINT_ERROR("get_parameter: get_capability_ltrcount failed"); eRet = OMX_ErrorUndefined; } } else { DEBUG_PRINT_ERROR("LTR count range is valid for output port only"); eRet = OMX_ErrorUnsupportedIndex; } } break; case OMX_QcomIndexParamVideoLTRCount: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_QCOM_VIDEO_PARAM_LTRCOUNT_TYPE); DEBUG_PRINT_LOW("get_parameter: OMX_QcomIndexParamVideoLTRCount"); OMX_QCOM_VIDEO_PARAM_LTRCOUNT_TYPE *pParam = reinterpret_cast<OMX_QCOM_VIDEO_PARAM_LTRCOUNT_TYPE*>(paramData); memcpy(pParam, &m_sParamLTRCount, sizeof(m_sParamLTRCount)); break; } #endif case QOMX_IndexParamVideoSyntaxHdr: { VALIDATE_OMX_PARAM_DATA(paramData, QOMX_EXTNINDEX_PARAMTYPE); DEBUG_PRINT_HIGH("QOMX_IndexParamVideoSyntaxHdr"); QOMX_EXTNINDEX_PARAMTYPE* pParam = reinterpret_cast<QOMX_EXTNINDEX_PARAMTYPE*>(paramData); if (pParam->pData == NULL) { DEBUG_PRINT_ERROR("Error: Data buffer is NULL"); eRet = OMX_ErrorBadParameter; break; } if (get_syntaxhdr_enable == false) { DEBUG_PRINT_ERROR("ERROR: get_parameter: Get syntax header disabled"); eRet = OMX_ErrorUnsupportedIndex; break; } BITMASK_SET(&m_flags, OMX_COMPONENT_LOADED_START_PENDING); if (dev_loaded_start()) { DEBUG_PRINT_LOW("device start successful"); } else { DEBUG_PRINT_ERROR("device start failed"); BITMASK_CLEAR(&m_flags, OMX_COMPONENT_LOADED_START_PENDING); return OMX_ErrorHardware; } if (dev_get_seq_hdr(pParam->pData, (unsigned)(pParam->nSize - sizeof(QOMX_EXTNINDEX_PARAMTYPE)), (unsigned *)(void *)&pParam->nDataSize)) { DEBUG_PRINT_HIGH("get syntax header successful (hdrlen = %u)", (unsigned int)pParam->nDataSize); for (unsigned i = 0; i < pParam->nDataSize; i++) { DEBUG_PRINT_LOW("Header[%d] = %x", i, *((char *)pParam->pData + i)); } } else { DEBUG_PRINT_ERROR("Error returned from GetSyntaxHeader()"); eRet = OMX_ErrorHardware; } BITMASK_SET(&m_flags, OMX_COMPONENT_LOADED_STOP_PENDING); if (dev_loaded_stop()) { DEBUG_PRINT_LOW("device stop successful"); } else { DEBUG_PRINT_ERROR("device stop failed"); BITMASK_CLEAR(&m_flags, OMX_COMPONENT_LOADED_STOP_PENDING); eRet = OMX_ErrorHardware; } break; } case OMX_QcomIndexHierarchicalStructure: { VALIDATE_OMX_PARAM_DATA(paramData, QOMX_VIDEO_HIERARCHICALLAYERS); QOMX_VIDEO_HIERARCHICALLAYERS* hierp = (QOMX_VIDEO_HIERARCHICALLAYERS*) paramData; DEBUG_PRINT_LOW("get_parameter: OMX_QcomIndexHierarchicalStructure"); memcpy(hierp, &m_sHierLayers, sizeof(m_sHierLayers)); break; } case OMX_QcomIndexParamPerfLevel: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_QCOM_VIDEO_PARAM_PERF_LEVEL); OMX_U32 perflevel; OMX_QCOM_VIDEO_PARAM_PERF_LEVEL *pParam = reinterpret_cast<OMX_QCOM_VIDEO_PARAM_PERF_LEVEL*>(paramData); DEBUG_PRINT_LOW("get_parameter: OMX_QcomIndexParamPerfLevel"); if (!dev_get_performance_level(&perflevel)) { DEBUG_PRINT_ERROR("Invalid entry returned from get_performance_level %d", pParam->ePerfLevel); } else { pParam->ePerfLevel = (QOMX_VIDEO_PERF_LEVEL)perflevel; } break; } case OMX_QcomIndexParamH264VUITimingInfo: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_QCOM_VIDEO_PARAM_VUI_TIMING_INFO); OMX_U32 enabled; OMX_QCOM_VIDEO_PARAM_VUI_TIMING_INFO *pParam = reinterpret_cast<OMX_QCOM_VIDEO_PARAM_VUI_TIMING_INFO*>(paramData); DEBUG_PRINT_LOW("get_parameter: OMX_QcomIndexParamH264VUITimingInfo"); if (!dev_get_vui_timing_info(&enabled)) { DEBUG_PRINT_ERROR("Invalid entry returned from get_vui_Timing_info %d", pParam->bEnable); } else { pParam->bEnable = (OMX_BOOL)enabled; } break; } case OMX_QcomIndexParamPeakBitrate: { VALIDATE_OMX_PARAM_DATA(paramData, OMX_QCOM_VIDEO_PARAM_PEAK_BITRATE); OMX_U32 peakbitrate; OMX_QCOM_VIDEO_PARAM_PEAK_BITRATE *pParam = reinterpret_cast<OMX_QCOM_VIDEO_PARAM_PEAK_BITRATE*>(paramData); DEBUG_PRINT_LOW("get_parameter: OMX_QcomIndexParamPeakBitrate"); if (!dev_get_peak_bitrate(&peakbitrate)) { DEBUG_PRINT_ERROR("Invalid entry returned from get_peak_bitrate %u", (unsigned int)pParam->nPeakBitrate); } else { pParam->nPeakBitrate = peakbitrate; } break; } case QOMX_IndexParamVideoInitialQp: { VALIDATE_OMX_PARAM_DATA(paramData, QOMX_EXTNINDEX_VIDEO_INITIALQP); QOMX_EXTNINDEX_VIDEO_INITIALQP* initqp = reinterpret_cast<QOMX_EXTNINDEX_VIDEO_INITIALQP *>(paramData); memcpy(initqp, &m_sParamInitqp, sizeof(m_sParamInitqp)); break; } case OMX_IndexParamVideoSliceFMO: default: { DEBUG_PRINT_LOW("ERROR: get_parameter: unknown param %08x", paramIndex); eRet =OMX_ErrorUnsupportedIndex; break; } } return eRet; }
173,794
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static ssize_t hfi1_file_write(struct file *fp, const char __user *data, size_t count, loff_t *offset) { const struct hfi1_cmd __user *ucmd; struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_cmd cmd; struct hfi1_user_info uinfo; struct hfi1_tid_info tinfo; unsigned long addr; ssize_t consumed = 0, copy = 0, ret = 0; void *dest = NULL; __u64 user_val = 0; int uctxt_required = 1; int must_be_root = 0; if (count < sizeof(cmd)) { ret = -EINVAL; goto bail; } ucmd = (const struct hfi1_cmd __user *)data; if (copy_from_user(&cmd, ucmd, sizeof(cmd))) { ret = -EFAULT; goto bail; } consumed = sizeof(cmd); switch (cmd.type) { case HFI1_CMD_ASSIGN_CTXT: uctxt_required = 0; /* assigned user context not required */ copy = sizeof(uinfo); dest = &uinfo; break; case HFI1_CMD_SDMA_STATUS_UPD: case HFI1_CMD_CREDIT_UPD: copy = 0; break; case HFI1_CMD_TID_UPDATE: case HFI1_CMD_TID_FREE: case HFI1_CMD_TID_INVAL_READ: copy = sizeof(tinfo); dest = &tinfo; break; case HFI1_CMD_USER_INFO: case HFI1_CMD_RECV_CTRL: case HFI1_CMD_POLL_TYPE: case HFI1_CMD_ACK_EVENT: case HFI1_CMD_CTXT_INFO: case HFI1_CMD_SET_PKEY: case HFI1_CMD_CTXT_RESET: copy = 0; user_val = cmd.addr; break; case HFI1_CMD_EP_INFO: case HFI1_CMD_EP_ERASE_CHIP: case HFI1_CMD_EP_ERASE_RANGE: case HFI1_CMD_EP_READ_RANGE: case HFI1_CMD_EP_WRITE_RANGE: uctxt_required = 0; /* assigned user context not required */ must_be_root = 1; /* validate user */ copy = 0; break; default: ret = -EINVAL; goto bail; } /* If the command comes with user data, copy it. */ if (copy) { if (copy_from_user(dest, (void __user *)cmd.addr, copy)) { ret = -EFAULT; goto bail; } consumed += copy; } /* * Make sure there is a uctxt when needed. */ if (uctxt_required && !uctxt) { ret = -EINVAL; goto bail; } /* only root can do these operations */ if (must_be_root && !capable(CAP_SYS_ADMIN)) { ret = -EPERM; goto bail; } switch (cmd.type) { case HFI1_CMD_ASSIGN_CTXT: ret = assign_ctxt(fp, &uinfo); if (ret < 0) goto bail; ret = setup_ctxt(fp); if (ret) goto bail; ret = user_init(fp); break; case HFI1_CMD_CTXT_INFO: ret = get_ctxt_info(fp, (void __user *)(unsigned long) user_val, cmd.len); break; case HFI1_CMD_USER_INFO: ret = get_base_info(fp, (void __user *)(unsigned long) user_val, cmd.len); break; case HFI1_CMD_SDMA_STATUS_UPD: break; case HFI1_CMD_CREDIT_UPD: if (uctxt && uctxt->sc) sc_return_credits(uctxt->sc); break; case HFI1_CMD_TID_UPDATE: ret = hfi1_user_exp_rcv_setup(fp, &tinfo); if (!ret) { /* * Copy the number of tidlist entries we used * and the length of the buffer we registered. * These fields are adjacent in the structure so * we can copy them at the same time. */ addr = (unsigned long)cmd.addr + offsetof(struct hfi1_tid_info, tidcnt); if (copy_to_user((void __user *)addr, &tinfo.tidcnt, sizeof(tinfo.tidcnt) + sizeof(tinfo.length))) ret = -EFAULT; } break; case HFI1_CMD_TID_INVAL_READ: ret = hfi1_user_exp_rcv_invalid(fp, &tinfo); if (ret) break; addr = (unsigned long)cmd.addr + offsetof(struct hfi1_tid_info, tidcnt); if (copy_to_user((void __user *)addr, &tinfo.tidcnt, sizeof(tinfo.tidcnt))) ret = -EFAULT; break; case HFI1_CMD_TID_FREE: ret = hfi1_user_exp_rcv_clear(fp, &tinfo); if (ret) break; addr = (unsigned long)cmd.addr + offsetof(struct hfi1_tid_info, tidcnt); if (copy_to_user((void __user *)addr, &tinfo.tidcnt, sizeof(tinfo.tidcnt))) ret = -EFAULT; break; case HFI1_CMD_RECV_CTRL: ret = manage_rcvq(uctxt, fd->subctxt, (int)user_val); break; case HFI1_CMD_POLL_TYPE: uctxt->poll_type = (typeof(uctxt->poll_type))user_val; break; case HFI1_CMD_ACK_EVENT: ret = user_event_ack(uctxt, fd->subctxt, user_val); break; case HFI1_CMD_SET_PKEY: if (HFI1_CAP_IS_USET(PKEY_CHECK)) ret = set_ctxt_pkey(uctxt, fd->subctxt, user_val); else ret = -EPERM; break; case HFI1_CMD_CTXT_RESET: { struct send_context *sc; struct hfi1_devdata *dd; if (!uctxt || !uctxt->dd || !uctxt->sc) { ret = -EINVAL; break; } /* * There is no protection here. User level has to * guarantee that no one will be writing to the send * context while it is being re-initialized. * If user level breaks that guarantee, it will break * it's own context and no one else's. */ dd = uctxt->dd; sc = uctxt->sc; /* * Wait until the interrupt handler has marked the * context as halted or frozen. Report error if we time * out. */ wait_event_interruptible_timeout( sc->halt_wait, (sc->flags & SCF_HALTED), msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); if (!(sc->flags & SCF_HALTED)) { ret = -ENOLCK; break; } /* * If the send context was halted due to a Freeze, * wait until the device has been "unfrozen" before * resetting the context. */ if (sc->flags & SCF_FROZEN) { wait_event_interruptible_timeout( dd->event_queue, !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN), msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); if (dd->flags & HFI1_FROZEN) { ret = -ENOLCK; break; } if (dd->flags & HFI1_FORCED_FREEZE) { /* * Don't allow context reset if we are into * forced freeze */ ret = -ENODEV; break; } sc_disable(sc); ret = sc_enable(sc); hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt->ctxt); } else { ret = sc_restart(sc); } if (!ret) sc_return_credits(sc); break; } case HFI1_CMD_EP_INFO: case HFI1_CMD_EP_ERASE_CHIP: case HFI1_CMD_EP_ERASE_RANGE: case HFI1_CMD_EP_READ_RANGE: case HFI1_CMD_EP_WRITE_RANGE: ret = handle_eprom_command(fp, &cmd); break; } if (ret >= 0) ret = consumed; bail: return ret; } Commit Message: IB/security: Restrict use of the write() interface The drivers/infiniband stack uses write() as a replacement for bi-directional ioctl(). This is not safe. There are ways to trigger write calls that result in the return structure that is normally written to user space being shunted off to user specified kernel memory instead. For the immediate repair, detect and deny suspicious accesses to the write API. For long term, update the user space libraries and the kernel API to something that doesn't present the same security vulnerabilities (likely a structured ioctl() interface). The impacted uAPI interfaces are generally only available if hardware from drivers/infiniband is installed in the system. Reported-by: Jann Horn <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]> [ Expanded check to all known write() entry points ] Cc: [email protected] Signed-off-by: Doug Ledford <[email protected]> CWE ID: CWE-264
static ssize_t hfi1_file_write(struct file *fp, const char __user *data, size_t count, loff_t *offset) { const struct hfi1_cmd __user *ucmd; struct hfi1_filedata *fd = fp->private_data; struct hfi1_ctxtdata *uctxt = fd->uctxt; struct hfi1_cmd cmd; struct hfi1_user_info uinfo; struct hfi1_tid_info tinfo; unsigned long addr; ssize_t consumed = 0, copy = 0, ret = 0; void *dest = NULL; __u64 user_val = 0; int uctxt_required = 1; int must_be_root = 0; /* FIXME: This interface cannot continue out of staging */ if (WARN_ON_ONCE(!ib_safe_file_access(fp))) return -EACCES; if (count < sizeof(cmd)) { ret = -EINVAL; goto bail; } ucmd = (const struct hfi1_cmd __user *)data; if (copy_from_user(&cmd, ucmd, sizeof(cmd))) { ret = -EFAULT; goto bail; } consumed = sizeof(cmd); switch (cmd.type) { case HFI1_CMD_ASSIGN_CTXT: uctxt_required = 0; /* assigned user context not required */ copy = sizeof(uinfo); dest = &uinfo; break; case HFI1_CMD_SDMA_STATUS_UPD: case HFI1_CMD_CREDIT_UPD: copy = 0; break; case HFI1_CMD_TID_UPDATE: case HFI1_CMD_TID_FREE: case HFI1_CMD_TID_INVAL_READ: copy = sizeof(tinfo); dest = &tinfo; break; case HFI1_CMD_USER_INFO: case HFI1_CMD_RECV_CTRL: case HFI1_CMD_POLL_TYPE: case HFI1_CMD_ACK_EVENT: case HFI1_CMD_CTXT_INFO: case HFI1_CMD_SET_PKEY: case HFI1_CMD_CTXT_RESET: copy = 0; user_val = cmd.addr; break; case HFI1_CMD_EP_INFO: case HFI1_CMD_EP_ERASE_CHIP: case HFI1_CMD_EP_ERASE_RANGE: case HFI1_CMD_EP_READ_RANGE: case HFI1_CMD_EP_WRITE_RANGE: uctxt_required = 0; /* assigned user context not required */ must_be_root = 1; /* validate user */ copy = 0; break; default: ret = -EINVAL; goto bail; } /* If the command comes with user data, copy it. */ if (copy) { if (copy_from_user(dest, (void __user *)cmd.addr, copy)) { ret = -EFAULT; goto bail; } consumed += copy; } /* * Make sure there is a uctxt when needed. */ if (uctxt_required && !uctxt) { ret = -EINVAL; goto bail; } /* only root can do these operations */ if (must_be_root && !capable(CAP_SYS_ADMIN)) { ret = -EPERM; goto bail; } switch (cmd.type) { case HFI1_CMD_ASSIGN_CTXT: ret = assign_ctxt(fp, &uinfo); if (ret < 0) goto bail; ret = setup_ctxt(fp); if (ret) goto bail; ret = user_init(fp); break; case HFI1_CMD_CTXT_INFO: ret = get_ctxt_info(fp, (void __user *)(unsigned long) user_val, cmd.len); break; case HFI1_CMD_USER_INFO: ret = get_base_info(fp, (void __user *)(unsigned long) user_val, cmd.len); break; case HFI1_CMD_SDMA_STATUS_UPD: break; case HFI1_CMD_CREDIT_UPD: if (uctxt && uctxt->sc) sc_return_credits(uctxt->sc); break; case HFI1_CMD_TID_UPDATE: ret = hfi1_user_exp_rcv_setup(fp, &tinfo); if (!ret) { /* * Copy the number of tidlist entries we used * and the length of the buffer we registered. * These fields are adjacent in the structure so * we can copy them at the same time. */ addr = (unsigned long)cmd.addr + offsetof(struct hfi1_tid_info, tidcnt); if (copy_to_user((void __user *)addr, &tinfo.tidcnt, sizeof(tinfo.tidcnt) + sizeof(tinfo.length))) ret = -EFAULT; } break; case HFI1_CMD_TID_INVAL_READ: ret = hfi1_user_exp_rcv_invalid(fp, &tinfo); if (ret) break; addr = (unsigned long)cmd.addr + offsetof(struct hfi1_tid_info, tidcnt); if (copy_to_user((void __user *)addr, &tinfo.tidcnt, sizeof(tinfo.tidcnt))) ret = -EFAULT; break; case HFI1_CMD_TID_FREE: ret = hfi1_user_exp_rcv_clear(fp, &tinfo); if (ret) break; addr = (unsigned long)cmd.addr + offsetof(struct hfi1_tid_info, tidcnt); if (copy_to_user((void __user *)addr, &tinfo.tidcnt, sizeof(tinfo.tidcnt))) ret = -EFAULT; break; case HFI1_CMD_RECV_CTRL: ret = manage_rcvq(uctxt, fd->subctxt, (int)user_val); break; case HFI1_CMD_POLL_TYPE: uctxt->poll_type = (typeof(uctxt->poll_type))user_val; break; case HFI1_CMD_ACK_EVENT: ret = user_event_ack(uctxt, fd->subctxt, user_val); break; case HFI1_CMD_SET_PKEY: if (HFI1_CAP_IS_USET(PKEY_CHECK)) ret = set_ctxt_pkey(uctxt, fd->subctxt, user_val); else ret = -EPERM; break; case HFI1_CMD_CTXT_RESET: { struct send_context *sc; struct hfi1_devdata *dd; if (!uctxt || !uctxt->dd || !uctxt->sc) { ret = -EINVAL; break; } /* * There is no protection here. User level has to * guarantee that no one will be writing to the send * context while it is being re-initialized. * If user level breaks that guarantee, it will break * it's own context and no one else's. */ dd = uctxt->dd; sc = uctxt->sc; /* * Wait until the interrupt handler has marked the * context as halted or frozen. Report error if we time * out. */ wait_event_interruptible_timeout( sc->halt_wait, (sc->flags & SCF_HALTED), msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); if (!(sc->flags & SCF_HALTED)) { ret = -ENOLCK; break; } /* * If the send context was halted due to a Freeze, * wait until the device has been "unfrozen" before * resetting the context. */ if (sc->flags & SCF_FROZEN) { wait_event_interruptible_timeout( dd->event_queue, !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN), msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); if (dd->flags & HFI1_FROZEN) { ret = -ENOLCK; break; } if (dd->flags & HFI1_FORCED_FREEZE) { /* * Don't allow context reset if we are into * forced freeze */ ret = -ENODEV; break; } sc_disable(sc); ret = sc_enable(sc); hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt->ctxt); } else { ret = sc_restart(sc); } if (!ret) sc_return_credits(sc); break; } case HFI1_CMD_EP_INFO: case HFI1_CMD_EP_ERASE_CHIP: case HFI1_CMD_EP_ERASE_RANGE: case HFI1_CMD_EP_READ_RANGE: case HFI1_CMD_EP_WRITE_RANGE: ret = handle_eprom_command(fp, &cmd); break; } if (ret >= 0) ret = consumed; bail: return ret; }
167,242
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void WebGLRenderingContextBase::TexImageHelperHTMLVideoElement( const SecurityOrigin* security_origin, TexImageFunctionID function_id, GLenum target, GLint level, GLint internalformat, GLenum format, GLenum type, GLint xoffset, GLint yoffset, GLint zoffset, HTMLVideoElement* video, const IntRect& source_image_rect, GLsizei depth, GLint unpack_image_height, ExceptionState& exception_state) { const char* func_name = GetTexImageFunctionName(function_id); if (isContextLost()) return; if (!ValidateHTMLVideoElement(security_origin, func_name, video, exception_state)) return; WebGLTexture* texture = ValidateTexImageBinding(func_name, function_id, target); if (!texture) return; TexImageFunctionType function_type; if (function_id == kTexImage2D || function_id == kTexImage3D) function_type = kTexImage; else function_type = kTexSubImage; if (!ValidateTexFunc(func_name, function_type, kSourceHTMLVideoElement, target, level, internalformat, video->videoWidth(), video->videoHeight(), 1, 0, format, type, xoffset, yoffset, zoffset)) return; WebMediaPlayer::VideoFrameUploadMetadata frame_metadata = {}; int already_uploaded_id = -1; WebMediaPlayer::VideoFrameUploadMetadata* frame_metadata_ptr = nullptr; if (RuntimeEnabledFeatures::ExperimentalCanvasFeaturesEnabled()) { already_uploaded_id = texture->GetLastUploadedVideoFrameId(); frame_metadata_ptr = &frame_metadata; } bool source_image_rect_is_default = source_image_rect == SentinelEmptyRect() || source_image_rect == IntRect(0, 0, video->videoWidth(), video->videoHeight()); const bool use_copyTextureCHROMIUM = function_id == kTexImage2D && source_image_rect_is_default && depth == 1 && GL_TEXTURE_2D == target && CanUseTexImageByGPU(format, type); if (use_copyTextureCHROMIUM) { DCHECK_EQ(xoffset, 0); DCHECK_EQ(yoffset, 0); DCHECK_EQ(zoffset, 0); if (video->CopyVideoTextureToPlatformTexture( ContextGL(), target, texture->Object(), internalformat, format, type, level, unpack_premultiply_alpha_, unpack_flip_y_, already_uploaded_id, frame_metadata_ptr)) { texture->UpdateLastUploadedFrame(frame_metadata); return; } } if (source_image_rect_is_default) { ScopedUnpackParametersResetRestore( this, unpack_flip_y_ || unpack_premultiply_alpha_); if (video->TexImageImpl( static_cast<WebMediaPlayer::TexImageFunctionID>(function_id), target, ContextGL(), texture->Object(), level, ConvertTexInternalFormat(internalformat, type), format, type, xoffset, yoffset, zoffset, unpack_flip_y_, unpack_premultiply_alpha_ && unpack_colorspace_conversion_ == GL_NONE)) { texture->ClearLastUploadedFrame(); return; } } if (use_copyTextureCHROMIUM) { std::unique_ptr<ImageBufferSurface> surface = WTF::WrapUnique(new AcceleratedImageBufferSurface( IntSize(video->videoWidth(), video->videoHeight()))); if (surface->IsValid()) { std::unique_ptr<ImageBuffer> image_buffer( ImageBuffer::Create(std::move(surface))); if (image_buffer) { video->PaintCurrentFrame( image_buffer->Canvas(), IntRect(0, 0, video->videoWidth(), video->videoHeight()), nullptr, already_uploaded_id, frame_metadata_ptr); TexImage2DBase(target, level, internalformat, video->videoWidth(), video->videoHeight(), 0, format, type, nullptr); if (image_buffer->CopyToPlatformTexture( FunctionIDToSnapshotReason(function_id), ContextGL(), target, texture->Object(), unpack_premultiply_alpha_, unpack_flip_y_, IntPoint(0, 0), IntRect(0, 0, video->videoWidth(), video->videoHeight()))) { texture->UpdateLastUploadedFrame(frame_metadata); return; } } } } scoped_refptr<Image> image = VideoFrameToImage(video, already_uploaded_id, frame_metadata_ptr); if (!image) return; TexImageImpl(function_id, target, level, internalformat, xoffset, yoffset, zoffset, format, type, image.get(), WebGLImageConversion::kHtmlDomVideo, unpack_flip_y_, unpack_premultiply_alpha_, source_image_rect, depth, unpack_image_height); texture->UpdateLastUploadedFrame(frame_metadata); } Commit Message: Tighten about IntRect use in WebGL with overflow detection BUG=784183 TEST=test case in the bug in ASAN build [email protected] Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel Change-Id: Ie25ca328af99de7828e28e6a6e3d775f1bebc43f Reviewed-on: https://chromium-review.googlesource.com/811826 Reviewed-by: Kenneth Russell <[email protected]> Commit-Queue: Zhenyao Mo <[email protected]> Cr-Commit-Position: refs/heads/master@{#522213} CWE ID: CWE-125
void WebGLRenderingContextBase::TexImageHelperHTMLVideoElement( const SecurityOrigin* security_origin, TexImageFunctionID function_id, GLenum target, GLint level, GLint internalformat, GLenum format, GLenum type, GLint xoffset, GLint yoffset, GLint zoffset, HTMLVideoElement* video, const IntRect& source_image_rect, GLsizei depth, GLint unpack_image_height, ExceptionState& exception_state) { const char* func_name = GetTexImageFunctionName(function_id); if (isContextLost()) return; if (!ValidateHTMLVideoElement(security_origin, func_name, video, exception_state)) return; WebGLTexture* texture = ValidateTexImageBinding(func_name, function_id, target); if (!texture) return; TexImageFunctionType function_type; if (function_id == kTexImage2D || function_id == kTexImage3D) function_type = kTexImage; else function_type = kTexSubImage; if (!ValidateTexFunc(func_name, function_type, kSourceHTMLVideoElement, target, level, internalformat, video->videoWidth(), video->videoHeight(), 1, 0, format, type, xoffset, yoffset, zoffset)) return; WebMediaPlayer::VideoFrameUploadMetadata frame_metadata = {}; int already_uploaded_id = -1; WebMediaPlayer::VideoFrameUploadMetadata* frame_metadata_ptr = nullptr; if (RuntimeEnabledFeatures::ExperimentalCanvasFeaturesEnabled()) { already_uploaded_id = texture->GetLastUploadedVideoFrameId(); frame_metadata_ptr = &frame_metadata; } if (!source_image_rect.IsValid()) { SynthesizeGLError(GL_INVALID_OPERATION, func_name, "source sub-rectangle specified via pixel unpack " "parameters is invalid"); return; } bool source_image_rect_is_default = source_image_rect == SentinelEmptyRect() || source_image_rect == IntRect(0, 0, video->videoWidth(), video->videoHeight()); const bool use_copyTextureCHROMIUM = function_id == kTexImage2D && source_image_rect_is_default && depth == 1 && GL_TEXTURE_2D == target && CanUseTexImageByGPU(format, type); if (use_copyTextureCHROMIUM) { DCHECK_EQ(xoffset, 0); DCHECK_EQ(yoffset, 0); DCHECK_EQ(zoffset, 0); if (video->CopyVideoTextureToPlatformTexture( ContextGL(), target, texture->Object(), internalformat, format, type, level, unpack_premultiply_alpha_, unpack_flip_y_, already_uploaded_id, frame_metadata_ptr)) { texture->UpdateLastUploadedFrame(frame_metadata); return; } } if (source_image_rect_is_default) { ScopedUnpackParametersResetRestore( this, unpack_flip_y_ || unpack_premultiply_alpha_); if (video->TexImageImpl( static_cast<WebMediaPlayer::TexImageFunctionID>(function_id), target, ContextGL(), texture->Object(), level, ConvertTexInternalFormat(internalformat, type), format, type, xoffset, yoffset, zoffset, unpack_flip_y_, unpack_premultiply_alpha_ && unpack_colorspace_conversion_ == GL_NONE)) { texture->ClearLastUploadedFrame(); return; } } if (use_copyTextureCHROMIUM) { std::unique_ptr<ImageBufferSurface> surface = WTF::WrapUnique(new AcceleratedImageBufferSurface( IntSize(video->videoWidth(), video->videoHeight()))); if (surface->IsValid()) { std::unique_ptr<ImageBuffer> image_buffer( ImageBuffer::Create(std::move(surface))); if (image_buffer) { video->PaintCurrentFrame( image_buffer->Canvas(), IntRect(0, 0, video->videoWidth(), video->videoHeight()), nullptr, already_uploaded_id, frame_metadata_ptr); TexImage2DBase(target, level, internalformat, video->videoWidth(), video->videoHeight(), 0, format, type, nullptr); if (image_buffer->CopyToPlatformTexture( FunctionIDToSnapshotReason(function_id), ContextGL(), target, texture->Object(), unpack_premultiply_alpha_, unpack_flip_y_, IntPoint(0, 0), IntRect(0, 0, video->videoWidth(), video->videoHeight()))) { texture->UpdateLastUploadedFrame(frame_metadata); return; } } } } scoped_refptr<Image> image = VideoFrameToImage(video, already_uploaded_id, frame_metadata_ptr); if (!image) return; TexImageImpl(function_id, target, level, internalformat, xoffset, yoffset, zoffset, format, type, image.get(), WebGLImageConversion::kHtmlDomVideo, unpack_flip_y_, unpack_premultiply_alpha_, source_image_rect, depth, unpack_image_height); texture->UpdateLastUploadedFrame(frame_metadata); }
172,667
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct sco_options opts; struct sco_conninfo cinfo; int len, err = 0; BT_DBG("sk %p", sk); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case SCO_OPTIONS: if (sk->sk_state != BT_CONNECTED) { err = -ENOTCONN; break; } opts.mtu = sco_pi(sk)->conn->mtu; BT_DBG("mtu %d", opts.mtu); len = min_t(unsigned int, len, sizeof(opts)); if (copy_to_user(optval, (char *)&opts, len)) err = -EFAULT; break; case SCO_CONNINFO: if (sk->sk_state != BT_CONNECTED) { err = -ENOTCONN; break; } cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle; memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3); len = min_t(unsigned int, len, sizeof(cinfo)); if (copy_to_user(optval, (char *)&cinfo, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } Commit Message: Bluetooth: sco: fix information leak to userspace struct sco_conninfo has one padding byte in the end. Local variable cinfo of type sco_conninfo is copied to userspace with this uninizialized one byte, leading to old stack contents leak. Signed-off-by: Vasiliy Kulikov <[email protected]> Signed-off-by: Gustavo F. Padovan <[email protected]> CWE ID: CWE-200
static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct sco_options opts; struct sco_conninfo cinfo; int len, err = 0; BT_DBG("sk %p", sk); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case SCO_OPTIONS: if (sk->sk_state != BT_CONNECTED) { err = -ENOTCONN; break; } opts.mtu = sco_pi(sk)->conn->mtu; BT_DBG("mtu %d", opts.mtu); len = min_t(unsigned int, len, sizeof(opts)); if (copy_to_user(optval, (char *)&opts, len)) err = -EFAULT; break; case SCO_CONNINFO: if (sk->sk_state != BT_CONNECTED) { err = -ENOTCONN; break; } memset(&cinfo, 0, sizeof(cinfo)); cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle; memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3); len = min_t(unsigned int, len, sizeof(cinfo)); if (copy_to_user(optval, (char *)&cinfo, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; }
165,898
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void *bpf_obj_do_get(const struct filename *pathname, enum bpf_type *type) { struct inode *inode; struct path path; void *raw; int ret; ret = kern_path(pathname->name, LOOKUP_FOLLOW, &path); if (ret) return ERR_PTR(ret); inode = d_backing_inode(path.dentry); ret = inode_permission(inode, MAY_WRITE); if (ret) goto out; ret = bpf_inode_type(inode, type); if (ret) goto out; raw = bpf_any_get(inode->i_private, *type); touch_atime(&path); path_put(&path); return raw; out: path_put(&path); return ERR_PTR(ret); } Commit Message: bpf: fix refcnt overflow On a system with >32Gbyte of phyiscal memory and infinite RLIMIT_MEMLOCK, the malicious application may overflow 32-bit bpf program refcnt. It's also possible to overflow map refcnt on 1Tb system. Impose 32k hard limit which means that the same bpf program or map cannot be shared by more than 32k processes. Fixes: 1be7f75d1668 ("bpf: enable non-root eBPF programs") Reported-by: Jann Horn <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Acked-by: Daniel Borkmann <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID:
static void *bpf_obj_do_get(const struct filename *pathname, enum bpf_type *type) { struct inode *inode; struct path path; void *raw; int ret; ret = kern_path(pathname->name, LOOKUP_FOLLOW, &path); if (ret) return ERR_PTR(ret); inode = d_backing_inode(path.dentry); ret = inode_permission(inode, MAY_WRITE); if (ret) goto out; ret = bpf_inode_type(inode, type); if (ret) goto out; raw = bpf_any_get(inode->i_private, *type); if (!IS_ERR(raw)) touch_atime(&path); path_put(&path); return raw; out: path_put(&path); return ERR_PTR(ret); }
167,251
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void PageHandler::SetRenderer(RenderProcessHost* process_host, RenderFrameHostImpl* frame_host) { if (host_ == frame_host) return; RenderWidgetHostImpl* widget_host = host_ ? host_->GetRenderWidgetHost() : nullptr; if (widget_host) { registrar_.Remove( this, content::NOTIFICATION_RENDER_WIDGET_VISIBILITY_CHANGED, content::Source<RenderWidgetHost>(widget_host)); } host_ = frame_host; widget_host = host_ ? host_->GetRenderWidgetHost() : nullptr; if (widget_host) { registrar_.Add( this, content::NOTIFICATION_RENDER_WIDGET_VISIBILITY_CHANGED, content::Source<RenderWidgetHost>(widget_host)); } } Commit Message: DevTools: speculative fix for crash in NetworkHandler::Disable This keeps BrowserContext* and StoragePartition* instead of RenderProcessHost* in an attemp to resolve UAF of RenderProcessHost upon closure of DevTools front-end. Bug: 801117, 783067, 780694 Change-Id: I6c2cca60cc0c29f0949d189cf918769059f80c1b Reviewed-on: https://chromium-review.googlesource.com/876657 Commit-Queue: Andrey Kosyakov <[email protected]> Reviewed-by: Dmitry Gozman <[email protected]> Cr-Commit-Position: refs/heads/master@{#531157} CWE ID: CWE-20
void PageHandler::SetRenderer(RenderProcessHost* process_host, void PageHandler::SetRenderer(int process_host_id, RenderFrameHostImpl* frame_host) { if (host_ == frame_host) return; RenderWidgetHostImpl* widget_host = host_ ? host_->GetRenderWidgetHost() : nullptr; if (widget_host) { registrar_.Remove( this, content::NOTIFICATION_RENDER_WIDGET_VISIBILITY_CHANGED, content::Source<RenderWidgetHost>(widget_host)); } host_ = frame_host; widget_host = host_ ? host_->GetRenderWidgetHost() : nullptr; if (widget_host) { registrar_.Add( this, content::NOTIFICATION_RENDER_WIDGET_VISIBILITY_CHANGED, content::Source<RenderWidgetHost>(widget_host)); } }
172,764