instruction
stringclasses 1
value | input
stringlengths 90
139k
| output
stringlengths 16
138k
| __index_level_0__
int64 165k
175k
|
---|---|---|---|
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool WebContentsImpl::ShowingInterstitialPage() const {
return GetRenderManager()->interstitial_page() != NULL;
}
Commit Message: Don't show current RenderWidgetHostView while interstitial is showing.
Also moves interstitial page tracking from RenderFrameHostManager to
WebContents, since interstitial pages are not frame-specific. This was
necessary for subframes to detect if an interstitial page is showing.
BUG=729105
TEST=See comment 13 of bug for repro steps
CQ_INCLUDE_TRYBOTS=master.tryserver.chromium.linux:linux_site_isolation
Review-Url: https://codereview.chromium.org/2938313002
Cr-Commit-Position: refs/heads/master@{#480117}
CWE ID: CWE-20 | bool WebContentsImpl::ShowingInterstitialPage() const {
return interstitial_page_ != nullptr;
}
| 172,334 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static inline int do_exception(struct pt_regs *regs, int access,
unsigned long trans_exc_code)
{
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct *vma;
unsigned long address;
unsigned int flags;
int fault;
if (notify_page_fault(regs))
return 0;
tsk = current;
mm = tsk->mm;
/*
* Verify that the fault happened in user space, that
* we are not in an interrupt and that there is a
* user context.
*/
fault = VM_FAULT_BADCONTEXT;
if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
goto out;
address = trans_exc_code & __FAIL_ADDR_MASK;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
flags = FAULT_FLAG_ALLOW_RETRY;
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
flags |= FAULT_FLAG_WRITE;
retry:
down_read(&mm->mmap_sem);
fault = VM_FAULT_BADMAP;
vma = find_vma(mm, address);
if (!vma)
goto out_up;
if (unlikely(vma->vm_start > address)) {
if (!(vma->vm_flags & VM_GROWSDOWN))
goto out_up;
if (expand_stack(vma, address))
goto out_up;
}
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
fault = VM_FAULT_BADACCESS;
if (unlikely(!(vma->vm_flags & access)))
goto out_up;
if (is_vm_hugetlb_page(vma))
address &= HPAGE_MASK;
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & VM_FAULT_ERROR))
goto out_up;
/*
* Major/minor page fault accounting is only done on the
* initial attempt. If we go through a retry, it is extremely
* likely that the page will be found in page cache at that point.
*/
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
if (fault & VM_FAULT_RETRY) {
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */
flags &= ~FAULT_FLAG_ALLOW_RETRY;
goto retry;
}
}
/*
* The instruction that caused the program check will
* be repeated. Don't signal single step via SIGTRAP.
*/
clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
fault = 0;
out_up:
up_read(&mm->mmap_sem);
out:
return fault;
}
Commit Message: perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.
For the various event classes:
- hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
the PMI-tail (ARM etc.)
- tracepoint: nmi=0; since tracepoint could be from NMI context.
- software: nmi=[0,1]; some, like the schedule thing cannot
perform wakeups, and hence need 0.
As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).
The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.
Signed-off-by: Peter Zijlstra <[email protected]>
Cc: Michael Cree <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Deng-Cheng Zhu <[email protected]>
Cc: Anton Blanchard <[email protected]>
Cc: Eric B Munson <[email protected]>
Cc: Heiko Carstens <[email protected]>
Cc: Paul Mundt <[email protected]>
Cc: David S. Miller <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: Jason Wessel <[email protected]>
Cc: Don Zickus <[email protected]>
Link: http://lkml.kernel.org/n/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
CWE ID: CWE-399 | static inline int do_exception(struct pt_regs *regs, int access,
unsigned long trans_exc_code)
{
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct *vma;
unsigned long address;
unsigned int flags;
int fault;
if (notify_page_fault(regs))
return 0;
tsk = current;
mm = tsk->mm;
/*
* Verify that the fault happened in user space, that
* we are not in an interrupt and that there is a
* user context.
*/
fault = VM_FAULT_BADCONTEXT;
if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
goto out;
address = trans_exc_code & __FAIL_ADDR_MASK;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
flags = FAULT_FLAG_ALLOW_RETRY;
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
flags |= FAULT_FLAG_WRITE;
retry:
down_read(&mm->mmap_sem);
fault = VM_FAULT_BADMAP;
vma = find_vma(mm, address);
if (!vma)
goto out_up;
if (unlikely(vma->vm_start > address)) {
if (!(vma->vm_flags & VM_GROWSDOWN))
goto out_up;
if (expand_stack(vma, address))
goto out_up;
}
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
fault = VM_FAULT_BADACCESS;
if (unlikely(!(vma->vm_flags & access)))
goto out_up;
if (is_vm_hugetlb_page(vma))
address &= HPAGE_MASK;
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & VM_FAULT_ERROR))
goto out_up;
/*
* Major/minor page fault accounting is only done on the
* initial attempt. If we go through a retry, it is extremely
* likely that the page will be found in page cache at that point.
*/
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}
if (fault & VM_FAULT_RETRY) {
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */
flags &= ~FAULT_FLAG_ALLOW_RETRY;
goto retry;
}
}
/*
* The instruction that caused the program check will
* be repeated. Don't signal single step via SIGTRAP.
*/
clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
fault = 0;
out_up:
up_read(&mm->mmap_sem);
out:
return fault;
}
| 165,794 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: jbig2_text_region(Jbig2Ctx *ctx, Jbig2Segment *segment, const byte *segment_data)
{
int offset = 0;
Jbig2RegionSegmentInfo region_info;
Jbig2TextRegionParams params;
Jbig2Image *image = NULL;
Jbig2SymbolDict **dicts = NULL;
int n_dicts = 0;
uint16_t flags = 0;
uint16_t huffman_flags = 0;
Jbig2ArithCx *GR_stats = NULL;
int code = 0;
Jbig2WordStream *ws = NULL;
Jbig2ArithState *as = NULL;
int table_index = 0;
const Jbig2HuffmanParams *huffman_params = NULL;
/* 7.4.1 */
if (segment->data_length < 17)
goto too_short;
jbig2_get_region_segment_info(®ion_info, segment_data);
offset += 17;
/* 7.4.3.1.1 */
flags = jbig2_get_uint16(segment_data + offset);
offset += 2;
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "text region header flags 0x%04x", flags);
/* zero params to ease cleanup later */
memset(¶ms, 0, sizeof(Jbig2TextRegionParams));
params.SBHUFF = flags & 0x0001;
params.SBREFINE = flags & 0x0002;
params.LOGSBSTRIPS = (flags & 0x000c) >> 2;
params.SBSTRIPS = 1 << params.LOGSBSTRIPS;
params.REFCORNER = (Jbig2RefCorner)((flags & 0x0030) >> 4);
params.TRANSPOSED = flags & 0x0040;
params.SBCOMBOP = (Jbig2ComposeOp)((flags & 0x0180) >> 7);
params.SBDEFPIXEL = flags & 0x0200;
/* SBDSOFFSET is a signed 5 bit integer */
params.SBDSOFFSET = (flags & 0x7C00) >> 10;
if (params.SBDSOFFSET > 0x0f)
params.SBDSOFFSET -= 0x20;
params.SBRTEMPLATE = flags & 0x8000;
if (params.SBDSOFFSET) {
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "text region has SBDSOFFSET %d", params.SBDSOFFSET);
}
if (params.SBHUFF) { /* Huffman coding */
/* 7.4.3.1.2 */
huffman_flags = jbig2_get_uint16(segment_data + offset);
offset += 2;
if (huffman_flags & 0x8000)
jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "reserved bit 15 of text region huffman flags is not zero");
} else { /* arithmetic coding */
/* 7.4.3.1.3 */
if ((params.SBREFINE) && !(params.SBRTEMPLATE)) {
params.sbrat[0] = segment_data[offset];
params.sbrat[1] = segment_data[offset + 1];
params.sbrat[2] = segment_data[offset + 2];
params.sbrat[3] = segment_data[offset + 3];
offset += 4;
}
}
/* 7.4.3.1.4 */
params.SBNUMINSTANCES = jbig2_get_uint32(segment_data + offset);
offset += 4;
if (params.SBHUFF) {
/* 7.4.3.1.5 - Symbol ID Huffman table */
/* ...this is handled in the segment body decoder */
/* 7.4.3.1.6 - Other Huffman table selection */
switch (huffman_flags & 0x0003) {
case 0: /* Table B.6 */
params.SBHUFFFS = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_F);
break;
case 1: /* Table B.7 */
params.SBHUFFFS = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_G);
break;
case 3: /* Custom table from referred segment */
huffman_params = jbig2_find_table(ctx, segment, table_index);
if (huffman_params == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Custom FS huffman table not found (%d)", table_index);
goto cleanup1;
}
params.SBHUFFFS = jbig2_build_huffman_table(ctx, huffman_params);
++table_index;
break;
case 2: /* invalid */
default:
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "text region specified invalid FS huffman table");
goto cleanup1;
break;
}
if (params.SBHUFFFS == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate text region specified FS huffman table");
goto cleanup1;
}
switch ((huffman_flags & 0x000c) >> 2) {
case 0: /* Table B.8 */
params.SBHUFFDS = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_H);
break;
case 1: /* Table B.9 */
params.SBHUFFDS = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_I);
break;
case 2: /* Table B.10 */
params.SBHUFFDS = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_J);
break;
case 3: /* Custom table from referred segment */
huffman_params = jbig2_find_table(ctx, segment, table_index);
if (huffman_params == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Custom DS huffman table not found (%d)", table_index);
goto cleanup1;
}
params.SBHUFFDS = jbig2_build_huffman_table(ctx, huffman_params);
++table_index;
break;
}
if (params.SBHUFFDS == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate text region specified DS huffman table");
goto cleanup1;
}
switch ((huffman_flags & 0x0030) >> 4) {
case 0: /* Table B.11 */
params.SBHUFFDT = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_K);
break;
case 1: /* Table B.12 */
params.SBHUFFDT = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_L);
break;
case 2: /* Table B.13 */
params.SBHUFFDT = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_M);
break;
case 3: /* Custom table from referred segment */
huffman_params = jbig2_find_table(ctx, segment, table_index);
if (huffman_params == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Custom DT huffman table not found (%d)", table_index);
goto cleanup1;
}
params.SBHUFFDT = jbig2_build_huffman_table(ctx, huffman_params);
++table_index;
break;
}
if (params.SBHUFFDT == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate text region specified DT huffman table");
goto cleanup1;
}
switch ((huffman_flags & 0x00c0) >> 6) {
case 0: /* Table B.14 */
params.SBHUFFRDW = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_N);
break;
case 1: /* Table B.15 */
params.SBHUFFRDW = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_O);
break;
case 3: /* Custom table from referred segment */
huffman_params = jbig2_find_table(ctx, segment, table_index);
if (huffman_params == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Custom RDW huffman table not found (%d)", table_index);
goto cleanup1;
}
params.SBHUFFRDW = jbig2_build_huffman_table(ctx, huffman_params);
++table_index;
break;
case 2: /* invalid */
default:
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "text region specified invalid RDW huffman table");
goto cleanup1;
break;
}
if (params.SBHUFFRDW == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate text region specified RDW huffman table");
goto cleanup1;
}
switch ((huffman_flags & 0x0300) >> 8) {
case 0: /* Table B.14 */
params.SBHUFFRDH = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_N);
break;
case 1: /* Table B.15 */
params.SBHUFFRDH = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_O);
break;
case 3: /* Custom table from referred segment */
huffman_params = jbig2_find_table(ctx, segment, table_index);
if (huffman_params == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Custom RDH huffman table not found (%d)", table_index);
goto cleanup1;
}
params.SBHUFFRDH = jbig2_build_huffman_table(ctx, huffman_params);
++table_index;
break;
case 2: /* invalid */
default:
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "text region specified invalid RDH huffman table");
goto cleanup1;
break;
}
if (params.SBHUFFRDH == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate text region specified RDH huffman table");
goto cleanup1;
}
switch ((huffman_flags & 0x0c00) >> 10) {
case 0: /* Table B.14 */
params.SBHUFFRDX = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_N);
break;
case 1: /* Table B.15 */
params.SBHUFFRDX = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_O);
break;
case 3: /* Custom table from referred segment */
huffman_params = jbig2_find_table(ctx, segment, table_index);
if (huffman_params == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Custom RDX huffman table not found (%d)", table_index);
goto cleanup1;
}
params.SBHUFFRDX = jbig2_build_huffman_table(ctx, huffman_params);
++table_index;
break;
case 2: /* invalid */
default:
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "text region specified invalid RDX huffman table");
goto cleanup1;
break;
}
if (params.SBHUFFRDX == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate text region specified RDX huffman table");
goto cleanup1;
}
switch ((huffman_flags & 0x3000) >> 12) {
case 0: /* Table B.14 */
params.SBHUFFRDY = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_N);
break;
case 1: /* Table B.15 */
params.SBHUFFRDY = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_O);
break;
case 3: /* Custom table from referred segment */
huffman_params = jbig2_find_table(ctx, segment, table_index);
if (huffman_params == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Custom RDY huffman table not found (%d)", table_index);
goto cleanup1;
}
params.SBHUFFRDY = jbig2_build_huffman_table(ctx, huffman_params);
++table_index;
break;
case 2: /* invalid */
default:
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "text region specified invalid RDY huffman table");
goto cleanup1;
break;
}
if (params.SBHUFFRDY == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate text region specified RDY huffman table");
goto cleanup1;
}
switch ((huffman_flags & 0x4000) >> 14) {
case 0: /* Table B.1 */
params.SBHUFFRSIZE = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_A);
break;
case 1: /* Custom table from referred segment */
huffman_params = jbig2_find_table(ctx, segment, table_index);
if (huffman_params == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Custom RSIZE huffman table not found (%d)", table_index);
goto cleanup1;
}
params.SBHUFFRSIZE = jbig2_build_huffman_table(ctx, huffman_params);
++table_index;
break;
}
if (params.SBHUFFRSIZE == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate text region specified RSIZE huffman table");
goto cleanup1;
}
if (huffman_flags & 0x8000) {
jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "text region huffman flags bit 15 is set, contrary to spec");
}
/* 7.4.3.1.7 */
/* For convenience this is done in the body decoder routine */
}
jbig2_error(ctx, JBIG2_SEVERITY_INFO, segment->number,
"text region: %d x %d @ (%d,%d) %d symbols", region_info.width, region_info.height, region_info.x, region_info.y, params.SBNUMINSTANCES);
/* 7.4.3.2 (2) - compose the list of symbol dictionaries */
n_dicts = jbig2_sd_count_referred(ctx, segment);
if (n_dicts != 0) {
dicts = jbig2_sd_list_referred(ctx, segment);
} else {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "text region refers to no symbol dictionaries!");
goto cleanup1;
}
if (dicts == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "unable to retrive symbol dictionaries! previous parsing error?");
goto cleanup1;
} else {
int index;
if (dicts[0] == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "unable to find first referenced symbol dictionary!");
goto cleanup1;
}
for (index = 1; index < n_dicts; index++)
if (dicts[index] == NULL) {
jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "unable to find all referenced symbol dictionaries!");
n_dicts = index;
}
}
/* 7.4.3.2 (3) */
{
int stats_size = params.SBRTEMPLATE ? 1 << 10 : 1 << 13;
GR_stats = jbig2_new(ctx, Jbig2ArithCx, stats_size);
if (GR_stats == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "could not allocate GR_stats");
goto cleanup1;
}
memset(GR_stats, 0, stats_size);
}
image = jbig2_image_new(ctx, region_info.width, region_info.height);
if (image == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "couldn't allocate text region image");
goto cleanup2;
}
ws = jbig2_word_stream_buf_new(ctx, segment_data + offset, segment->data_length - offset);
if (ws == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "couldn't allocate ws in text region image");
goto cleanup2;
}
as = jbig2_arith_new(ctx, ws);
if (as == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "couldn't allocate as in text region image");
goto cleanup2;
}
if (!params.SBHUFF) {
int SBSYMCODELEN, index;
int SBNUMSYMS = 0;
for (index = 0; index < n_dicts; index++) {
SBNUMSYMS += dicts[index]->n_symbols;
}
params.IADT = jbig2_arith_int_ctx_new(ctx);
params.IAFS = jbig2_arith_int_ctx_new(ctx);
params.IADS = jbig2_arith_int_ctx_new(ctx);
params.IAIT = jbig2_arith_int_ctx_new(ctx);
if ((params.IADT == NULL) || (params.IAFS == NULL) || (params.IADS == NULL) || (params.IAIT == NULL)) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "couldn't allocate text region image data");
goto cleanup3;
}
/* Table 31 */
for (SBSYMCODELEN = 0; (1 << SBSYMCODELEN) < SBNUMSYMS; SBSYMCODELEN++) {
}
params.IAID = jbig2_arith_iaid_ctx_new(ctx, SBSYMCODELEN);
params.IARI = jbig2_arith_int_ctx_new(ctx);
params.IARDW = jbig2_arith_int_ctx_new(ctx);
params.IARDH = jbig2_arith_int_ctx_new(ctx);
params.IARDX = jbig2_arith_int_ctx_new(ctx);
params.IARDY = jbig2_arith_int_ctx_new(ctx);
if ((params.IAID == NULL) || (params.IARI == NULL) ||
(params.IARDW == NULL) || (params.IARDH == NULL) || (params.IARDX == NULL) || (params.IARDY == NULL)) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "couldn't allocate text region image data");
goto cleanup4;
}
}
code = jbig2_decode_text_region(ctx, segment, ¶ms,
(const Jbig2SymbolDict * const *)dicts, n_dicts, image,
segment_data + offset, segment->data_length - offset, GR_stats, as, ws);
if (code < 0) {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to decode text region image data");
goto cleanup4;
}
if ((segment->flags & 63) == 4) {
/* we have an intermediate region here. save it for later */
segment->result = jbig2_image_clone(ctx, image);
} else {
/* otherwise composite onto the page */
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number,
"composing %dx%d decoded text region onto page at (%d, %d)", region_info.width, region_info.height, region_info.x, region_info.y);
jbig2_page_add_result(ctx, &ctx->pages[ctx->current_page], image, region_info.x, region_info.y, region_info.op);
}
cleanup4:
if (!params.SBHUFF) {
jbig2_arith_iaid_ctx_free(ctx, params.IAID);
jbig2_arith_int_ctx_free(ctx, params.IARI);
jbig2_arith_int_ctx_free(ctx, params.IARDW);
jbig2_arith_int_ctx_free(ctx, params.IARDH);
jbig2_arith_int_ctx_free(ctx, params.IARDX);
jbig2_arith_int_ctx_free(ctx, params.IARDY);
}
cleanup3:
if (!params.SBHUFF) {
jbig2_arith_int_ctx_free(ctx, params.IADT);
jbig2_arith_int_ctx_free(ctx, params.IAFS);
jbig2_arith_int_ctx_free(ctx, params.IADS);
jbig2_arith_int_ctx_free(ctx, params.IAIT);
}
jbig2_free(ctx->allocator, as);
jbig2_word_stream_buf_free(ctx, ws);
cleanup2:
jbig2_free(ctx->allocator, GR_stats);
jbig2_image_release(ctx, image);
cleanup1:
if (params.SBHUFF) {
jbig2_release_huffman_table(ctx, params.SBHUFFFS);
jbig2_release_huffman_table(ctx, params.SBHUFFDS);
jbig2_release_huffman_table(ctx, params.SBHUFFDT);
jbig2_release_huffman_table(ctx, params.SBHUFFRDX);
jbig2_release_huffman_table(ctx, params.SBHUFFRDY);
jbig2_release_huffman_table(ctx, params.SBHUFFRDW);
jbig2_release_huffman_table(ctx, params.SBHUFFRDH);
jbig2_release_huffman_table(ctx, params.SBHUFFRSIZE);
}
jbig2_free(ctx->allocator, dicts);
return code;
too_short:
return jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Segment too short");
}
Commit Message:
CWE ID: CWE-119 | jbig2_text_region(Jbig2Ctx *ctx, Jbig2Segment *segment, const byte *segment_data)
{
uint32_t offset = 0;
Jbig2RegionSegmentInfo region_info;
Jbig2TextRegionParams params;
Jbig2Image *image = NULL;
Jbig2SymbolDict **dicts = NULL;
uint32_t n_dicts = 0;
uint16_t flags = 0;
uint16_t huffman_flags = 0;
Jbig2ArithCx *GR_stats = NULL;
int code = 0;
Jbig2WordStream *ws = NULL;
Jbig2ArithState *as = NULL;
uint32_t table_index = 0;
const Jbig2HuffmanParams *huffman_params = NULL;
/* 7.4.1 */
if (segment->data_length < 17)
goto too_short;
jbig2_get_region_segment_info(®ion_info, segment_data);
offset += 17;
/* 7.4.3.1.1 */
flags = jbig2_get_uint16(segment_data + offset);
offset += 2;
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "text region header flags 0x%04x", flags);
/* zero params to ease cleanup later */
memset(¶ms, 0, sizeof(Jbig2TextRegionParams));
params.SBHUFF = flags & 0x0001;
params.SBREFINE = flags & 0x0002;
params.LOGSBSTRIPS = (flags & 0x000c) >> 2;
params.SBSTRIPS = 1 << params.LOGSBSTRIPS;
params.REFCORNER = (Jbig2RefCorner)((flags & 0x0030) >> 4);
params.TRANSPOSED = flags & 0x0040;
params.SBCOMBOP = (Jbig2ComposeOp)((flags & 0x0180) >> 7);
params.SBDEFPIXEL = flags & 0x0200;
/* SBDSOFFSET is a signed 5 bit integer */
params.SBDSOFFSET = (flags & 0x7C00) >> 10;
if (params.SBDSOFFSET > 0x0f)
params.SBDSOFFSET -= 0x20;
params.SBRTEMPLATE = flags & 0x8000;
if (params.SBDSOFFSET) {
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "text region has SBDSOFFSET %d", params.SBDSOFFSET);
}
if (params.SBHUFF) { /* Huffman coding */
/* 7.4.3.1.2 */
huffman_flags = jbig2_get_uint16(segment_data + offset);
offset += 2;
if (huffman_flags & 0x8000)
jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "reserved bit 15 of text region huffman flags is not zero");
} else { /* arithmetic coding */
/* 7.4.3.1.3 */
if ((params.SBREFINE) && !(params.SBRTEMPLATE)) {
params.sbrat[0] = segment_data[offset];
params.sbrat[1] = segment_data[offset + 1];
params.sbrat[2] = segment_data[offset + 2];
params.sbrat[3] = segment_data[offset + 3];
offset += 4;
}
}
/* 7.4.3.1.4 */
params.SBNUMINSTANCES = jbig2_get_uint32(segment_data + offset);
offset += 4;
if (params.SBHUFF) {
/* 7.4.3.1.5 - Symbol ID Huffman table */
/* ...this is handled in the segment body decoder */
/* 7.4.3.1.6 - Other Huffman table selection */
switch (huffman_flags & 0x0003) {
case 0: /* Table B.6 */
params.SBHUFFFS = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_F);
break;
case 1: /* Table B.7 */
params.SBHUFFFS = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_G);
break;
case 3: /* Custom table from referred segment */
huffman_params = jbig2_find_table(ctx, segment, table_index);
if (huffman_params == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Custom FS huffman table not found (%d)", table_index);
goto cleanup1;
}
params.SBHUFFFS = jbig2_build_huffman_table(ctx, huffman_params);
++table_index;
break;
case 2: /* invalid */
default:
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "text region specified invalid FS huffman table");
goto cleanup1;
break;
}
if (params.SBHUFFFS == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate text region specified FS huffman table");
goto cleanup1;
}
switch ((huffman_flags & 0x000c) >> 2) {
case 0: /* Table B.8 */
params.SBHUFFDS = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_H);
break;
case 1: /* Table B.9 */
params.SBHUFFDS = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_I);
break;
case 2: /* Table B.10 */
params.SBHUFFDS = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_J);
break;
case 3: /* Custom table from referred segment */
huffman_params = jbig2_find_table(ctx, segment, table_index);
if (huffman_params == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Custom DS huffman table not found (%d)", table_index);
goto cleanup1;
}
params.SBHUFFDS = jbig2_build_huffman_table(ctx, huffman_params);
++table_index;
break;
}
if (params.SBHUFFDS == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate text region specified DS huffman table");
goto cleanup1;
}
switch ((huffman_flags & 0x0030) >> 4) {
case 0: /* Table B.11 */
params.SBHUFFDT = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_K);
break;
case 1: /* Table B.12 */
params.SBHUFFDT = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_L);
break;
case 2: /* Table B.13 */
params.SBHUFFDT = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_M);
break;
case 3: /* Custom table from referred segment */
huffman_params = jbig2_find_table(ctx, segment, table_index);
if (huffman_params == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Custom DT huffman table not found (%d)", table_index);
goto cleanup1;
}
params.SBHUFFDT = jbig2_build_huffman_table(ctx, huffman_params);
++table_index;
break;
}
if (params.SBHUFFDT == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate text region specified DT huffman table");
goto cleanup1;
}
switch ((huffman_flags & 0x00c0) >> 6) {
case 0: /* Table B.14 */
params.SBHUFFRDW = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_N);
break;
case 1: /* Table B.15 */
params.SBHUFFRDW = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_O);
break;
case 3: /* Custom table from referred segment */
huffman_params = jbig2_find_table(ctx, segment, table_index);
if (huffman_params == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Custom RDW huffman table not found (%d)", table_index);
goto cleanup1;
}
params.SBHUFFRDW = jbig2_build_huffman_table(ctx, huffman_params);
++table_index;
break;
case 2: /* invalid */
default:
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "text region specified invalid RDW huffman table");
goto cleanup1;
break;
}
if (params.SBHUFFRDW == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate text region specified RDW huffman table");
goto cleanup1;
}
switch ((huffman_flags & 0x0300) >> 8) {
case 0: /* Table B.14 */
params.SBHUFFRDH = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_N);
break;
case 1: /* Table B.15 */
params.SBHUFFRDH = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_O);
break;
case 3: /* Custom table from referred segment */
huffman_params = jbig2_find_table(ctx, segment, table_index);
if (huffman_params == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Custom RDH huffman table not found (%d)", table_index);
goto cleanup1;
}
params.SBHUFFRDH = jbig2_build_huffman_table(ctx, huffman_params);
++table_index;
break;
case 2: /* invalid */
default:
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "text region specified invalid RDH huffman table");
goto cleanup1;
break;
}
if (params.SBHUFFRDH == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate text region specified RDH huffman table");
goto cleanup1;
}
switch ((huffman_flags & 0x0c00) >> 10) {
case 0: /* Table B.14 */
params.SBHUFFRDX = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_N);
break;
case 1: /* Table B.15 */
params.SBHUFFRDX = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_O);
break;
case 3: /* Custom table from referred segment */
huffman_params = jbig2_find_table(ctx, segment, table_index);
if (huffman_params == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Custom RDX huffman table not found (%d)", table_index);
goto cleanup1;
}
params.SBHUFFRDX = jbig2_build_huffman_table(ctx, huffman_params);
++table_index;
break;
case 2: /* invalid */
default:
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "text region specified invalid RDX huffman table");
goto cleanup1;
break;
}
if (params.SBHUFFRDX == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate text region specified RDX huffman table");
goto cleanup1;
}
switch ((huffman_flags & 0x3000) >> 12) {
case 0: /* Table B.14 */
params.SBHUFFRDY = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_N);
break;
case 1: /* Table B.15 */
params.SBHUFFRDY = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_O);
break;
case 3: /* Custom table from referred segment */
huffman_params = jbig2_find_table(ctx, segment, table_index);
if (huffman_params == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Custom RDY huffman table not found (%d)", table_index);
goto cleanup1;
}
params.SBHUFFRDY = jbig2_build_huffman_table(ctx, huffman_params);
++table_index;
break;
case 2: /* invalid */
default:
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "text region specified invalid RDY huffman table");
goto cleanup1;
break;
}
if (params.SBHUFFRDY == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate text region specified RDY huffman table");
goto cleanup1;
}
switch ((huffman_flags & 0x4000) >> 14) {
case 0: /* Table B.1 */
params.SBHUFFRSIZE = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_A);
break;
case 1: /* Custom table from referred segment */
huffman_params = jbig2_find_table(ctx, segment, table_index);
if (huffman_params == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Custom RSIZE huffman table not found (%d)", table_index);
goto cleanup1;
}
params.SBHUFFRSIZE = jbig2_build_huffman_table(ctx, huffman_params);
++table_index;
break;
}
if (params.SBHUFFRSIZE == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate text region specified RSIZE huffman table");
goto cleanup1;
}
if (huffman_flags & 0x8000) {
jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "text region huffman flags bit 15 is set, contrary to spec");
}
/* 7.4.3.1.7 */
/* For convenience this is done in the body decoder routine */
}
jbig2_error(ctx, JBIG2_SEVERITY_INFO, segment->number,
"text region: %d x %d @ (%d,%d) %d symbols", region_info.width, region_info.height, region_info.x, region_info.y, params.SBNUMINSTANCES);
/* 7.4.3.2 (2) - compose the list of symbol dictionaries */
n_dicts = jbig2_sd_count_referred(ctx, segment);
if (n_dicts != 0) {
dicts = jbig2_sd_list_referred(ctx, segment);
} else {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "text region refers to no symbol dictionaries!");
goto cleanup1;
}
if (dicts == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "unable to retrive symbol dictionaries! previous parsing error?");
goto cleanup1;
} else {
uint32_t index;
if (dicts[0] == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "unable to find first referenced symbol dictionary!");
goto cleanup1;
}
for (index = 1; index < n_dicts; index++)
if (dicts[index] == NULL) {
jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "unable to find all referenced symbol dictionaries!");
n_dicts = index;
}
}
/* 7.4.3.2 (3) */
{
int stats_size = params.SBRTEMPLATE ? 1 << 10 : 1 << 13;
GR_stats = jbig2_new(ctx, Jbig2ArithCx, stats_size);
if (GR_stats == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "could not allocate GR_stats");
goto cleanup1;
}
memset(GR_stats, 0, stats_size);
}
image = jbig2_image_new(ctx, region_info.width, region_info.height);
if (image == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "couldn't allocate text region image");
goto cleanup2;
}
ws = jbig2_word_stream_buf_new(ctx, segment_data + offset, segment->data_length - offset);
if (ws == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "couldn't allocate ws in text region image");
goto cleanup2;
}
as = jbig2_arith_new(ctx, ws);
if (as == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "couldn't allocate as in text region image");
goto cleanup2;
}
if (!params.SBHUFF) {
uint32_t SBSYMCODELEN, index;
uint32_t SBNUMSYMS = 0;
for (index = 0; index < n_dicts; index++) {
SBNUMSYMS += dicts[index]->n_symbols;
}
params.IADT = jbig2_arith_int_ctx_new(ctx);
params.IAFS = jbig2_arith_int_ctx_new(ctx);
params.IADS = jbig2_arith_int_ctx_new(ctx);
params.IAIT = jbig2_arith_int_ctx_new(ctx);
if ((params.IADT == NULL) || (params.IAFS == NULL) || (params.IADS == NULL) || (params.IAIT == NULL)) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "couldn't allocate text region image data");
goto cleanup3;
}
/* Table 31 */
for (SBSYMCODELEN = 0; (1U << SBSYMCODELEN) < SBNUMSYMS; SBSYMCODELEN++) {
}
params.IAID = jbig2_arith_iaid_ctx_new(ctx, SBSYMCODELEN);
params.IARI = jbig2_arith_int_ctx_new(ctx);
params.IARDW = jbig2_arith_int_ctx_new(ctx);
params.IARDH = jbig2_arith_int_ctx_new(ctx);
params.IARDX = jbig2_arith_int_ctx_new(ctx);
params.IARDY = jbig2_arith_int_ctx_new(ctx);
if ((params.IAID == NULL) || (params.IARI == NULL) ||
(params.IARDW == NULL) || (params.IARDH == NULL) || (params.IARDX == NULL) || (params.IARDY == NULL)) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "couldn't allocate text region image data");
goto cleanup4;
}
}
code = jbig2_decode_text_region(ctx, segment, ¶ms,
(const Jbig2SymbolDict * const *)dicts, n_dicts, image,
segment_data + offset, segment->data_length - offset, GR_stats, as, ws);
if (code < 0) {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to decode text region image data");
goto cleanup4;
}
if ((segment->flags & 63) == 4) {
/* we have an intermediate region here. save it for later */
segment->result = jbig2_image_clone(ctx, image);
} else {
/* otherwise composite onto the page */
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number,
"composing %dx%d decoded text region onto page at (%d, %d)", region_info.width, region_info.height, region_info.x, region_info.y);
jbig2_page_add_result(ctx, &ctx->pages[ctx->current_page], image, region_info.x, region_info.y, region_info.op);
}
cleanup4:
if (!params.SBHUFF) {
jbig2_arith_iaid_ctx_free(ctx, params.IAID);
jbig2_arith_int_ctx_free(ctx, params.IARI);
jbig2_arith_int_ctx_free(ctx, params.IARDW);
jbig2_arith_int_ctx_free(ctx, params.IARDH);
jbig2_arith_int_ctx_free(ctx, params.IARDX);
jbig2_arith_int_ctx_free(ctx, params.IARDY);
}
cleanup3:
if (!params.SBHUFF) {
jbig2_arith_int_ctx_free(ctx, params.IADT);
jbig2_arith_int_ctx_free(ctx, params.IAFS);
jbig2_arith_int_ctx_free(ctx, params.IADS);
jbig2_arith_int_ctx_free(ctx, params.IAIT);
}
jbig2_free(ctx->allocator, as);
jbig2_word_stream_buf_free(ctx, ws);
cleanup2:
jbig2_free(ctx->allocator, GR_stats);
jbig2_image_release(ctx, image);
cleanup1:
if (params.SBHUFF) {
jbig2_release_huffman_table(ctx, params.SBHUFFFS);
jbig2_release_huffman_table(ctx, params.SBHUFFDS);
jbig2_release_huffman_table(ctx, params.SBHUFFDT);
jbig2_release_huffman_table(ctx, params.SBHUFFRDX);
jbig2_release_huffman_table(ctx, params.SBHUFFRDY);
jbig2_release_huffman_table(ctx, params.SBHUFFRDW);
jbig2_release_huffman_table(ctx, params.SBHUFFRDH);
jbig2_release_huffman_table(ctx, params.SBHUFFRSIZE);
}
jbig2_free(ctx->allocator, dicts);
return code;
too_short:
return jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Segment too short");
}
| 165,505 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: OMX_ERRORTYPE SimpleSoftOMXComponent::internalGetParameter(
OMX_INDEXTYPE index, OMX_PTR params) {
switch (index) {
case OMX_IndexParamPortDefinition:
{
OMX_PARAM_PORTDEFINITIONTYPE *defParams =
(OMX_PARAM_PORTDEFINITIONTYPE *)params;
if (defParams->nPortIndex >= mPorts.size()
|| defParams->nSize
!= sizeof(OMX_PARAM_PORTDEFINITIONTYPE)) {
return OMX_ErrorUndefined;
}
const PortInfo *port =
&mPorts.itemAt(defParams->nPortIndex);
memcpy(defParams, &port->mDef, sizeof(port->mDef));
return OMX_ErrorNone;
}
default:
return OMX_ErrorUnsupportedIndex;
}
}
Commit Message: DO NOT MERGE Verify OMX buffer sizes prior to access
Bug: 27207275
Change-Id: I4412825d1ee233d993af0a67708bea54304ff62d
CWE ID: CWE-119 | OMX_ERRORTYPE SimpleSoftOMXComponent::internalGetParameter(
OMX_INDEXTYPE index, OMX_PTR params) {
switch (index) {
case OMX_IndexParamPortDefinition:
{
OMX_PARAM_PORTDEFINITIONTYPE *defParams =
(OMX_PARAM_PORTDEFINITIONTYPE *)params;
if (!isValidOMXParam(defParams)) {
return OMX_ErrorBadParameter;
}
if (defParams->nPortIndex >= mPorts.size()
|| defParams->nSize
!= sizeof(OMX_PARAM_PORTDEFINITIONTYPE)) {
return OMX_ErrorUndefined;
}
const PortInfo *port =
&mPorts.itemAt(defParams->nPortIndex);
memcpy(defParams, &port->mDef, sizeof(port->mDef));
return OMX_ErrorNone;
}
default:
return OMX_ErrorUnsupportedIndex;
}
}
| 174,222 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static inline void sem_putref(struct sem_array *sma)
{
ipc_lock_by_ptr(&sma->sem_perm);
ipc_rcu_putref(sma);
ipc_unlock(&(sma)->sem_perm);
}
Commit Message: ipc,sem: fine grained locking for semtimedop
Introduce finer grained locking for semtimedop, to handle the common case
of a program wanting to manipulate one semaphore from an array with
multiple semaphores.
If the call is a semop manipulating just one semaphore in an array with
multiple semaphores, only take the lock for that semaphore itself.
If the call needs to manipulate multiple semaphores, or another caller is
in a transaction that manipulates multiple semaphores, the sem_array lock
is taken, as well as all the locks for the individual semaphores.
On a 24 CPU system, performance numbers with the semop-multi
test with N threads and N semaphores, look like this:
vanilla Davidlohr's Davidlohr's + Davidlohr's +
threads patches rwlock patches v3 patches
10 610652 726325 1783589 2142206
20 341570 365699 1520453 1977878
30 288102 307037 1498167 2037995
40 290714 305955 1612665 2256484
50 288620 312890 1733453 2650292
60 289987 306043 1649360 2388008
70 291298 306347 1723167 2717486
80 290948 305662 1729545 2763582
90 290996 306680 1736021 2757524
100 292243 306700 1773700 3059159
[[email protected]: do not call sem_lock when bogus sma]
[[email protected]: make refcounter atomic]
Signed-off-by: Rik van Riel <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Acked-by: Davidlohr Bueso <[email protected]>
Cc: Chegu Vinod <[email protected]>
Cc: Jason Low <[email protected]>
Reviewed-by: Michel Lespinasse <[email protected]>
Cc: Peter Hurley <[email protected]>
Cc: Stanislav Kinsbursky <[email protected]>
Tested-by: Emmanuel Benisty <[email protected]>
Tested-by: Sedat Dilek <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
CWE ID: CWE-189 | static inline void sem_putref(struct sem_array *sma)
{
sem_lock_and_putref(sma);
sem_unlock(sma, -1);
}
| 165,978 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
struct vm_area_struct *dst_vma,
unsigned long dst_start,
unsigned long src_start,
unsigned long len,
bool zeropage)
{
int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED;
int vm_shared = dst_vma->vm_flags & VM_SHARED;
ssize_t err;
pte_t *dst_pte;
unsigned long src_addr, dst_addr;
long copied;
struct page *page;
struct hstate *h;
unsigned long vma_hpagesize;
pgoff_t idx;
u32 hash;
struct address_space *mapping;
/*
* There is no default zero huge page for all huge page sizes as
* supported by hugetlb. A PMD_SIZE huge pages may exist as used
* by THP. Since we can not reliably insert a zero page, this
* feature is not supported.
*/
if (zeropage) {
up_read(&dst_mm->mmap_sem);
return -EINVAL;
}
src_addr = src_start;
dst_addr = dst_start;
copied = 0;
page = NULL;
vma_hpagesize = vma_kernel_pagesize(dst_vma);
/*
* Validate alignment based on huge page size
*/
err = -EINVAL;
if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
goto out_unlock;
retry:
/*
* On routine entry dst_vma is set. If we had to drop mmap_sem and
* retry, dst_vma will be set to NULL and we must lookup again.
*/
if (!dst_vma) {
err = -ENOENT;
dst_vma = find_vma(dst_mm, dst_start);
if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
goto out_unlock;
/*
* Only allow __mcopy_atomic_hugetlb on userfaultfd
* registered ranges.
*/
if (!dst_vma->vm_userfaultfd_ctx.ctx)
goto out_unlock;
if (dst_start < dst_vma->vm_start ||
dst_start + len > dst_vma->vm_end)
goto out_unlock;
err = -EINVAL;
if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
goto out_unlock;
vm_shared = dst_vma->vm_flags & VM_SHARED;
}
if (WARN_ON(dst_addr & (vma_hpagesize - 1) ||
(len - copied) & (vma_hpagesize - 1)))
goto out_unlock;
/*
* If not shared, ensure the dst_vma has a anon_vma.
*/
err = -ENOMEM;
if (!vm_shared) {
if (unlikely(anon_vma_prepare(dst_vma)))
goto out_unlock;
}
h = hstate_vma(dst_vma);
while (src_addr < src_start + len) {
pte_t dst_pteval;
BUG_ON(dst_addr >= dst_start + len);
VM_BUG_ON(dst_addr & ~huge_page_mask(h));
/*
* Serialize via hugetlb_fault_mutex
*/
idx = linear_page_index(dst_vma, dst_addr);
mapping = dst_vma->vm_file->f_mapping;
hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping,
idx, dst_addr);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
err = -ENOMEM;
dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
if (!dst_pte) {
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
goto out_unlock;
}
err = -EEXIST;
dst_pteval = huge_ptep_get(dst_pte);
if (!huge_pte_none(dst_pteval)) {
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
goto out_unlock;
}
err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
dst_addr, src_addr, &page);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
vm_alloc_shared = vm_shared;
cond_resched();
if (unlikely(err == -ENOENT)) {
up_read(&dst_mm->mmap_sem);
BUG_ON(!page);
err = copy_huge_page_from_user(page,
(const void __user *)src_addr,
pages_per_huge_page(h), true);
if (unlikely(err)) {
err = -EFAULT;
goto out;
}
down_read(&dst_mm->mmap_sem);
dst_vma = NULL;
goto retry;
} else
BUG_ON(page);
if (!err) {
dst_addr += vma_hpagesize;
src_addr += vma_hpagesize;
copied += vma_hpagesize;
if (fatal_signal_pending(current))
err = -EINTR;
}
if (err)
break;
}
out_unlock:
up_read(&dst_mm->mmap_sem);
out:
if (page) {
/*
* We encountered an error and are about to free a newly
* allocated huge page.
*
* Reservation handling is very subtle, and is different for
* private and shared mappings. See the routine
* restore_reserve_on_error for details. Unfortunately, we
* can not call restore_reserve_on_error now as it would
* require holding mmap_sem.
*
* If a reservation for the page existed in the reservation
* map of a private mapping, the map was modified to indicate
* the reservation was consumed when the page was allocated.
* We clear the PagePrivate flag now so that the global
* reserve count will not be incremented in free_huge_page.
* The reservation map will still indicate the reservation
* was consumed and possibly prevent later page allocation.
* This is better than leaking a global reservation. If no
* reservation existed, it is still safe to clear PagePrivate
* as no adjustments to reservation counts were made during
* allocation.
*
* The reservation map for shared mappings indicates which
* pages have reservations. When a huge page is allocated
* for an address with a reservation, no change is made to
* the reserve map. In this case PagePrivate will be set
* to indicate that the global reservation count should be
* incremented when the page is freed. This is the desired
* behavior. However, when a huge page is allocated for an
* address without a reservation a reservation entry is added
* to the reservation map, and PagePrivate will not be set.
* When the page is freed, the global reserve count will NOT
* be incremented and it will appear as though we have leaked
* reserved page. In this case, set PagePrivate so that the
* global reserve count will be incremented to match the
* reservation map entry which was created.
*
* Note that vm_alloc_shared is based on the flags of the vma
* for which the page was originally allocated. dst_vma could
* be different or NULL on error.
*/
if (vm_alloc_shared)
SetPagePrivate(page);
else
ClearPagePrivate(page);
put_page(page);
}
BUG_ON(copied < 0);
BUG_ON(err > 0);
BUG_ON(!copied && !err);
return copied ? copied : err;
}
Commit Message: userfaultfd: shmem/hugetlbfs: only allow to register VM_MAYWRITE vmas
After the VMA to register the uffd onto is found, check that it has
VM_MAYWRITE set before allowing registration. This way we inherit all
common code checks before allowing to fill file holes in shmem and
hugetlbfs with UFFDIO_COPY.
The userfaultfd memory model is not applicable for readonly files unless
it's a MAP_PRIVATE.
Link: http://lkml.kernel.org/r/[email protected]
Fixes: ff62a3421044 ("hugetlb: implement memfd sealing")
Signed-off-by: Andrea Arcangeli <[email protected]>
Reviewed-by: Mike Rapoport <[email protected]>
Reviewed-by: Hugh Dickins <[email protected]>
Reported-by: Jann Horn <[email protected]>
Fixes: 4c27fe4c4c84 ("userfaultfd: shmem: add shmem_mcopy_atomic_pte for userfaultfd support")
Cc: <[email protected]>
Cc: "Dr. David Alan Gilbert" <[email protected]>
Cc: Mike Kravetz <[email protected]>
Cc: Peter Xu <[email protected]>
Cc: [email protected]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
CWE ID: | static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
struct vm_area_struct *dst_vma,
unsigned long dst_start,
unsigned long src_start,
unsigned long len,
bool zeropage)
{
int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED;
int vm_shared = dst_vma->vm_flags & VM_SHARED;
ssize_t err;
pte_t *dst_pte;
unsigned long src_addr, dst_addr;
long copied;
struct page *page;
struct hstate *h;
unsigned long vma_hpagesize;
pgoff_t idx;
u32 hash;
struct address_space *mapping;
/*
* There is no default zero huge page for all huge page sizes as
* supported by hugetlb. A PMD_SIZE huge pages may exist as used
* by THP. Since we can not reliably insert a zero page, this
* feature is not supported.
*/
if (zeropage) {
up_read(&dst_mm->mmap_sem);
return -EINVAL;
}
src_addr = src_start;
dst_addr = dst_start;
copied = 0;
page = NULL;
vma_hpagesize = vma_kernel_pagesize(dst_vma);
/*
* Validate alignment based on huge page size
*/
err = -EINVAL;
if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
goto out_unlock;
retry:
/*
* On routine entry dst_vma is set. If we had to drop mmap_sem and
* retry, dst_vma will be set to NULL and we must lookup again.
*/
if (!dst_vma) {
err = -ENOENT;
dst_vma = find_vma(dst_mm, dst_start);
if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
goto out_unlock;
/*
* Check the vma is registered in uffd, this is
* required to enforce the VM_MAYWRITE check done at
* uffd registration time.
*/
if (!dst_vma->vm_userfaultfd_ctx.ctx)
goto out_unlock;
if (dst_start < dst_vma->vm_start ||
dst_start + len > dst_vma->vm_end)
goto out_unlock;
err = -EINVAL;
if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
goto out_unlock;
vm_shared = dst_vma->vm_flags & VM_SHARED;
}
if (WARN_ON(dst_addr & (vma_hpagesize - 1) ||
(len - copied) & (vma_hpagesize - 1)))
goto out_unlock;
/*
* If not shared, ensure the dst_vma has a anon_vma.
*/
err = -ENOMEM;
if (!vm_shared) {
if (unlikely(anon_vma_prepare(dst_vma)))
goto out_unlock;
}
h = hstate_vma(dst_vma);
while (src_addr < src_start + len) {
pte_t dst_pteval;
BUG_ON(dst_addr >= dst_start + len);
VM_BUG_ON(dst_addr & ~huge_page_mask(h));
/*
* Serialize via hugetlb_fault_mutex
*/
idx = linear_page_index(dst_vma, dst_addr);
mapping = dst_vma->vm_file->f_mapping;
hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping,
idx, dst_addr);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
err = -ENOMEM;
dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
if (!dst_pte) {
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
goto out_unlock;
}
err = -EEXIST;
dst_pteval = huge_ptep_get(dst_pte);
if (!huge_pte_none(dst_pteval)) {
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
goto out_unlock;
}
err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
dst_addr, src_addr, &page);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
vm_alloc_shared = vm_shared;
cond_resched();
if (unlikely(err == -ENOENT)) {
up_read(&dst_mm->mmap_sem);
BUG_ON(!page);
err = copy_huge_page_from_user(page,
(const void __user *)src_addr,
pages_per_huge_page(h), true);
if (unlikely(err)) {
err = -EFAULT;
goto out;
}
down_read(&dst_mm->mmap_sem);
dst_vma = NULL;
goto retry;
} else
BUG_ON(page);
if (!err) {
dst_addr += vma_hpagesize;
src_addr += vma_hpagesize;
copied += vma_hpagesize;
if (fatal_signal_pending(current))
err = -EINTR;
}
if (err)
break;
}
out_unlock:
up_read(&dst_mm->mmap_sem);
out:
if (page) {
/*
* We encountered an error and are about to free a newly
* allocated huge page.
*
* Reservation handling is very subtle, and is different for
* private and shared mappings. See the routine
* restore_reserve_on_error for details. Unfortunately, we
* can not call restore_reserve_on_error now as it would
* require holding mmap_sem.
*
* If a reservation for the page existed in the reservation
* map of a private mapping, the map was modified to indicate
* the reservation was consumed when the page was allocated.
* We clear the PagePrivate flag now so that the global
* reserve count will not be incremented in free_huge_page.
* The reservation map will still indicate the reservation
* was consumed and possibly prevent later page allocation.
* This is better than leaking a global reservation. If no
* reservation existed, it is still safe to clear PagePrivate
* as no adjustments to reservation counts were made during
* allocation.
*
* The reservation map for shared mappings indicates which
* pages have reservations. When a huge page is allocated
* for an address with a reservation, no change is made to
* the reserve map. In this case PagePrivate will be set
* to indicate that the global reservation count should be
* incremented when the page is freed. This is the desired
* behavior. However, when a huge page is allocated for an
* address without a reservation a reservation entry is added
* to the reservation map, and PagePrivate will not be set.
* When the page is freed, the global reserve count will NOT
* be incremented and it will appear as though we have leaked
* reserved page. In this case, set PagePrivate so that the
* global reserve count will be incremented to match the
* reservation map entry which was created.
*
* Note that vm_alloc_shared is based on the flags of the vma
* for which the page was originally allocated. dst_vma could
* be different or NULL on error.
*/
if (vm_alloc_shared)
SetPagePrivate(page);
else
ClearPagePrivate(page);
put_page(page);
}
BUG_ON(copied < 0);
BUG_ON(err > 0);
BUG_ON(!copied && !err);
return copied ? copied : err;
}
| 169,008 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: xsltAddTemplate(xsltStylesheetPtr style, xsltTemplatePtr cur,
const xmlChar *mode, const xmlChar *modeURI) {
xsltCompMatchPtr pat, list, next;
/*
* 'top' will point to style->xxxMatch ptr - declaring as 'void'
* avoids gcc 'type-punned pointer' warning.
*/
void **top = NULL;
const xmlChar *name = NULL;
float priority; /* the priority */
if ((style == NULL) || (cur == NULL) || (cur->match == NULL))
return(-1);
priority = cur->priority;
pat = xsltCompilePatternInternal(cur->match, style->doc, cur->elem,
style, NULL, 1);
if (pat == NULL)
return(-1);
while (pat) {
next = pat->next;
pat->next = NULL;
name = NULL;
pat->template = cur;
if (mode != NULL)
pat->mode = xmlDictLookup(style->dict, mode, -1);
if (modeURI != NULL)
pat->modeURI = xmlDictLookup(style->dict, modeURI, -1);
if (priority != XSLT_PAT_NO_PRIORITY)
pat->priority = priority;
/*
* insert it in the hash table list corresponding to its lookup name
*/
switch (pat->steps[0].op) {
case XSLT_OP_ATTR:
if (pat->steps[0].value != NULL)
name = pat->steps[0].value;
else
top = &(style->attrMatch);
break;
case XSLT_OP_PARENT:
case XSLT_OP_ANCESTOR:
top = &(style->elemMatch);
break;
case XSLT_OP_ROOT:
top = &(style->rootMatch);
break;
case XSLT_OP_KEY:
top = &(style->keyMatch);
break;
case XSLT_OP_ID:
/* TODO optimize ID !!! */
case XSLT_OP_NS:
case XSLT_OP_ALL:
top = &(style->elemMatch);
break;
case XSLT_OP_END:
case XSLT_OP_PREDICATE:
xsltTransformError(NULL, style, NULL,
"xsltAddTemplate: invalid compiled pattern\n");
xsltFreeCompMatch(pat);
return(-1);
/*
* TODO: some flags at the top level about type based patterns
* would be faster than inclusion in the hash table.
*/
case XSLT_OP_PI:
if (pat->steps[0].value != NULL)
name = pat->steps[0].value;
else
top = &(style->piMatch);
break;
case XSLT_OP_COMMENT:
top = &(style->commentMatch);
break;
case XSLT_OP_TEXT:
top = &(style->textMatch);
break;
case XSLT_OP_ELEM:
case XSLT_OP_NODE:
if (pat->steps[0].value != NULL)
name = pat->steps[0].value;
else
top = &(style->elemMatch);
break;
}
if (name != NULL) {
if (style->templatesHash == NULL) {
style->templatesHash = xmlHashCreate(1024);
if (style->templatesHash == NULL) {
xsltFreeCompMatch(pat);
return(-1);
}
xmlHashAddEntry3(style->templatesHash, name, mode, modeURI, pat);
} else {
list = (xsltCompMatchPtr) xmlHashLookup3(style->templatesHash,
name, mode, modeURI);
if (list == NULL) {
xmlHashAddEntry3(style->templatesHash, name,
mode, modeURI, pat);
} else {
/*
* Note '<=' since one must choose among the matching
* template rules that are left, the one that occurs
* last in the stylesheet
*/
if (list->priority <= pat->priority) {
pat->next = list;
xmlHashUpdateEntry3(style->templatesHash, name,
mode, modeURI, pat, NULL);
} else {
while (list->next != NULL) {
if (list->next->priority <= pat->priority)
break;
list = list->next;
}
pat->next = list->next;
list->next = pat;
}
}
}
} else if (top != NULL) {
list = *top;
if (list == NULL) {
*top = pat;
pat->next = NULL;
} else if (list->priority <= pat->priority) {
pat->next = list;
*top = pat;
} else {
while (list->next != NULL) {
if (list->next->priority <= pat->priority)
break;
list = list->next;
}
pat->next = list->next;
list->next = pat;
}
} else {
xsltTransformError(NULL, style, NULL,
"xsltAddTemplate: invalid compiled pattern\n");
xsltFreeCompMatch(pat);
return(-1);
}
#ifdef WITH_XSLT_DEBUG_PATTERN
if (mode)
xsltGenericDebug(xsltGenericDebugContext,
"added pattern : '%s' mode '%s' priority %f\n",
pat->pattern, pat->mode, pat->priority);
else
xsltGenericDebug(xsltGenericDebugContext,
"added pattern : '%s' priority %f\n",
pat->pattern, pat->priority);
#endif
pat = next;
}
return(0);
}
Commit Message: Roll libxslt to 891681e3e948f31732229f53cb6db7215f740fc7
BUG=583156,583171
Review URL: https://codereview.chromium.org/1853083002
Cr-Commit-Position: refs/heads/master@{#385338}
CWE ID: CWE-119 | xsltAddTemplate(xsltStylesheetPtr style, xsltTemplatePtr cur,
const xmlChar *mode, const xmlChar *modeURI) {
xsltCompMatchPtr pat, list, next;
/*
* 'top' will point to style->xxxMatch ptr - declaring as 'void'
* avoids gcc 'type-punned pointer' warning.
*/
void **top = NULL;
const xmlChar *name = NULL;
float priority; /* the priority */
if ((style == NULL) || (cur == NULL))
return(-1);
/* Register named template */
if (cur->name != NULL) {
if (style->namedTemplates == NULL) {
style->namedTemplates = xmlHashCreate(10);
if (style->namedTemplates == NULL)
return(-1);
}
else {
void *dup = xmlHashLookup2(style->namedTemplates, cur->name,
cur->nameURI);
if (dup != NULL) {
xsltTransformError(NULL, style, NULL,
"xsl:template: error duplicate name '%s'\n",
cur->name);
style->errors++;
return(-1);
}
}
xmlHashAddEntry2(style->namedTemplates, cur->name, cur->nameURI, cur);
}
if (cur->match == NULL)
return(0);
priority = cur->priority;
pat = xsltCompilePatternInternal(cur->match, style->doc, cur->elem,
style, NULL, 1);
if (pat == NULL)
return(-1);
while (pat) {
next = pat->next;
pat->next = NULL;
name = NULL;
pat->template = cur;
if (mode != NULL)
pat->mode = xmlDictLookup(style->dict, mode, -1);
if (modeURI != NULL)
pat->modeURI = xmlDictLookup(style->dict, modeURI, -1);
if (priority != XSLT_PAT_NO_PRIORITY)
pat->priority = priority;
/*
* insert it in the hash table list corresponding to its lookup name
*/
switch (pat->steps[0].op) {
case XSLT_OP_ATTR:
if (pat->steps[0].value != NULL)
name = pat->steps[0].value;
else
top = &(style->attrMatch);
break;
case XSLT_OP_PARENT:
case XSLT_OP_ANCESTOR:
top = &(style->elemMatch);
break;
case XSLT_OP_ROOT:
top = &(style->rootMatch);
break;
case XSLT_OP_KEY:
top = &(style->keyMatch);
break;
case XSLT_OP_ID:
/* TODO optimize ID !!! */
case XSLT_OP_NS:
case XSLT_OP_ALL:
top = &(style->elemMatch);
break;
case XSLT_OP_END:
case XSLT_OP_PREDICATE:
xsltTransformError(NULL, style, NULL,
"xsltAddTemplate: invalid compiled pattern\n");
xsltFreeCompMatch(pat);
return(-1);
/*
* TODO: some flags at the top level about type based patterns
* would be faster than inclusion in the hash table.
*/
case XSLT_OP_PI:
if (pat->steps[0].value != NULL)
name = pat->steps[0].value;
else
top = &(style->piMatch);
break;
case XSLT_OP_COMMENT:
top = &(style->commentMatch);
break;
case XSLT_OP_TEXT:
top = &(style->textMatch);
break;
case XSLT_OP_ELEM:
case XSLT_OP_NODE:
if (pat->steps[0].value != NULL)
name = pat->steps[0].value;
else
top = &(style->elemMatch);
break;
}
if (name != NULL) {
if (style->templatesHash == NULL) {
style->templatesHash = xmlHashCreate(1024);
if (style->templatesHash == NULL) {
xsltFreeCompMatch(pat);
return(-1);
}
xmlHashAddEntry3(style->templatesHash, name, mode, modeURI, pat);
} else {
list = (xsltCompMatchPtr) xmlHashLookup3(style->templatesHash,
name, mode, modeURI);
if (list == NULL) {
xmlHashAddEntry3(style->templatesHash, name,
mode, modeURI, pat);
} else {
/*
* Note '<=' since one must choose among the matching
* template rules that are left, the one that occurs
* last in the stylesheet
*/
if (list->priority <= pat->priority) {
pat->next = list;
xmlHashUpdateEntry3(style->templatesHash, name,
mode, modeURI, pat, NULL);
} else {
while (list->next != NULL) {
if (list->next->priority <= pat->priority)
break;
list = list->next;
}
pat->next = list->next;
list->next = pat;
}
}
}
} else if (top != NULL) {
list = *top;
if (list == NULL) {
*top = pat;
pat->next = NULL;
} else if (list->priority <= pat->priority) {
pat->next = list;
*top = pat;
} else {
while (list->next != NULL) {
if (list->next->priority <= pat->priority)
break;
list = list->next;
}
pat->next = list->next;
list->next = pat;
}
} else {
xsltTransformError(NULL, style, NULL,
"xsltAddTemplate: invalid compiled pattern\n");
xsltFreeCompMatch(pat);
return(-1);
}
#ifdef WITH_XSLT_DEBUG_PATTERN
if (mode)
xsltGenericDebug(xsltGenericDebugContext,
"added pattern : '%s' mode '%s' priority %f\n",
pat->pattern, pat->mode, pat->priority);
else
xsltGenericDebug(xsltGenericDebugContext,
"added pattern : '%s' priority %f\n",
pat->pattern, pat->priority);
#endif
pat = next;
}
return(0);
}
| 173,310 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void inbound_data_waiting(void *context) {
eager_reader_t *reader = (eager_reader_t *)context;
data_buffer_t *buffer = (data_buffer_t *)reader->allocator->alloc(reader->buffer_size + sizeof(data_buffer_t));
if (!buffer) {
LOG_ERROR("%s couldn't aquire memory for inbound data buffer.", __func__);
return;
}
buffer->length = 0;
buffer->offset = 0;
int bytes_read = read(reader->inbound_fd, buffer->data, reader->buffer_size);
if (bytes_read > 0) {
buffer->length = bytes_read;
fixed_queue_enqueue(reader->buffers, buffer);
eventfd_write(reader->bytes_available_fd, bytes_read);
} else {
if (bytes_read == 0)
LOG_WARN("%s fd said bytes existed, but none were found.", __func__);
else
LOG_WARN("%s unable to read from file descriptor: %s", __func__, strerror(errno));
reader->allocator->free(buffer);
}
}
Commit Message: DO NOT MERGE Fix potential DoS caused by delivering signal to BT process
Bug: 28885210
Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360
Conflicts:
btif/co/bta_hh_co.c
btif/src/btif_core.c
Merge conflict resolution of ag/1161415 (referencing ag/1164670)
- Directly into mnc-mr2-release
CWE ID: CWE-284 | static void inbound_data_waiting(void *context) {
eager_reader_t *reader = (eager_reader_t *)context;
data_buffer_t *buffer = (data_buffer_t *)reader->allocator->alloc(reader->buffer_size + sizeof(data_buffer_t));
if (!buffer) {
LOG_ERROR("%s couldn't aquire memory for inbound data buffer.", __func__);
return;
}
buffer->length = 0;
buffer->offset = 0;
int bytes_read = TEMP_FAILURE_RETRY(read(reader->inbound_fd, buffer->data, reader->buffer_size));
if (bytes_read > 0) {
buffer->length = bytes_read;
fixed_queue_enqueue(reader->buffers, buffer);
eventfd_write(reader->bytes_available_fd, bytes_read);
} else {
if (bytes_read == 0)
LOG_WARN("%s fd said bytes existed, but none were found.", __func__);
else
LOG_WARN("%s unable to read from file descriptor: %s", __func__, strerror(errno));
reader->allocator->free(buffer);
}
}
| 173,481 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void SafeBrowsingBlockingPage::CommandReceived(const std::string& cmd) {
std::string command(cmd); // Make a local copy so we can modify it.
if (command.length() > 1 && command[0] == '"') {
command = command.substr(1, command.length() - 2);
}
RecordUserReactionTime(command);
if (command == kDoReportCommand) {
SetReportingPreference(true);
return;
}
if (command == kDontReportCommand) {
SetReportingPreference(false);
return;
}
if (command == kLearnMoreCommand) {
GURL url;
SBThreatType threat_type = unsafe_resources_[0].threat_type;
if (threat_type == SB_THREAT_TYPE_URL_MALWARE) {
url = google_util::AppendGoogleLocaleParam(GURL(kLearnMoreMalwareUrl));
} else if (threat_type == SB_THREAT_TYPE_URL_PHISHING ||
threat_type == SB_THREAT_TYPE_CLIENT_SIDE_PHISHING_URL) {
url = google_util::AppendGoogleLocaleParam(GURL(kLearnMorePhishingUrl));
} else {
NOTREACHED();
}
OpenURLParams params(
url, Referrer(), CURRENT_TAB, content::PAGE_TRANSITION_LINK, false);
web_contents_->OpenURL(params);
return;
}
if (command == kLearnMoreCommandV2) {
GURL url;
SBThreatType threat_type = unsafe_resources_[0].threat_type;
if (threat_type == SB_THREAT_TYPE_URL_MALWARE) {
url = google_util::AppendGoogleLocaleParam(GURL(kLearnMoreMalwareUrlV2));
} else if (threat_type == SB_THREAT_TYPE_URL_PHISHING ||
threat_type == SB_THREAT_TYPE_CLIENT_SIDE_PHISHING_URL) {
url = google_util::AppendGoogleLocaleParam(GURL(kLearnMorePhishingUrlV2));
} else {
NOTREACHED();
}
OpenURLParams params(
url, Referrer(), CURRENT_TAB, content::PAGE_TRANSITION_LINK, false);
web_contents_->OpenURL(params);
return;
}
if (command == kShowPrivacyCommand) {
GURL url(l10n_util::GetStringUTF8(IDS_SAFE_BROWSING_PRIVACY_POLICY_URL));
OpenURLParams params(
url, Referrer(), CURRENT_TAB, content::PAGE_TRANSITION_LINK, false);
web_contents_->OpenURL(params);
return;
}
bool proceed_blocked = false;
if (command == kProceedCommand) {
if (IsPrefEnabled(prefs::kSafeBrowsingProceedAnywayDisabled)) {
proceed_blocked = true;
} else {
interstitial_page_->Proceed();
return;
}
}
if (command == kTakeMeBackCommand || proceed_blocked) {
if (is_main_frame_load_blocked_) {
interstitial_page_->DontProceed();
return;
}
if (web_contents_->GetController().CanGoBack()) {
web_contents_->GetController().GoBack();
} else {
web_contents_->GetController().LoadURL(
GURL(chrome::kChromeUINewTabURL),
content::Referrer(),
content::PAGE_TRANSITION_AUTO_TOPLEVEL,
std::string());
}
return;
}
int element_index = 0;
size_t colon_index = command.find(':');
if (colon_index != std::string::npos) {
DCHECK(colon_index < command.size() - 1);
bool result = base::StringToInt(base::StringPiece(command.begin() +
colon_index + 1,
command.end()),
&element_index);
command = command.substr(0, colon_index);
DCHECK(result);
}
if (element_index >= static_cast<int>(unsafe_resources_.size())) {
NOTREACHED();
return;
}
std::string bad_url_spec = unsafe_resources_[element_index].url.spec();
if (command == kReportErrorCommand) {
SBThreatType threat_type = unsafe_resources_[element_index].threat_type;
DCHECK(threat_type == SB_THREAT_TYPE_URL_PHISHING ||
threat_type == SB_THREAT_TYPE_CLIENT_SIDE_PHISHING_URL);
GURL report_url =
safe_browsing_util::GeneratePhishingReportUrl(
kSbReportPhishingErrorUrl,
bad_url_spec,
threat_type == SB_THREAT_TYPE_CLIENT_SIDE_PHISHING_URL);
OpenURLParams params(
report_url, Referrer(), CURRENT_TAB, content::PAGE_TRANSITION_LINK,
false);
web_contents_->OpenURL(params);
return;
}
if (command == kShowDiagnosticCommand) {
std::string diagnostic =
base::StringPrintf(kSbDiagnosticUrl,
net::EscapeQueryParamValue(bad_url_spec, true).c_str());
GURL diagnostic_url(diagnostic);
diagnostic_url = google_util::AppendGoogleLocaleParam(diagnostic_url);
DCHECK(unsafe_resources_[element_index].threat_type ==
SB_THREAT_TYPE_URL_MALWARE);
OpenURLParams params(
diagnostic_url, Referrer(), CURRENT_TAB, content::PAGE_TRANSITION_LINK,
false);
web_contents_->OpenURL(params);
return;
}
if (command == kExpandedSeeMore) {
return;
}
NOTREACHED() << "Unexpected command: " << command;
}
Commit Message: Check for a negative integer properly.
BUG=169966
Review URL: https://chromiumcodereview.appspot.com/11892002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@176879 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | void SafeBrowsingBlockingPage::CommandReceived(const std::string& cmd) {
std::string command(cmd); // Make a local copy so we can modify it.
if (command.length() > 1 && command[0] == '"') {
command = command.substr(1, command.length() - 2);
}
RecordUserReactionTime(command);
if (command == kDoReportCommand) {
SetReportingPreference(true);
return;
}
if (command == kDontReportCommand) {
SetReportingPreference(false);
return;
}
if (command == kLearnMoreCommand) {
GURL url;
SBThreatType threat_type = unsafe_resources_[0].threat_type;
if (threat_type == SB_THREAT_TYPE_URL_MALWARE) {
url = google_util::AppendGoogleLocaleParam(GURL(kLearnMoreMalwareUrl));
} else if (threat_type == SB_THREAT_TYPE_URL_PHISHING ||
threat_type == SB_THREAT_TYPE_CLIENT_SIDE_PHISHING_URL) {
url = google_util::AppendGoogleLocaleParam(GURL(kLearnMorePhishingUrl));
} else {
NOTREACHED();
}
OpenURLParams params(
url, Referrer(), CURRENT_TAB, content::PAGE_TRANSITION_LINK, false);
web_contents_->OpenURL(params);
return;
}
if (command == kLearnMoreCommandV2) {
GURL url;
SBThreatType threat_type = unsafe_resources_[0].threat_type;
if (threat_type == SB_THREAT_TYPE_URL_MALWARE) {
url = google_util::AppendGoogleLocaleParam(GURL(kLearnMoreMalwareUrlV2));
} else if (threat_type == SB_THREAT_TYPE_URL_PHISHING ||
threat_type == SB_THREAT_TYPE_CLIENT_SIDE_PHISHING_URL) {
url = google_util::AppendGoogleLocaleParam(GURL(kLearnMorePhishingUrlV2));
} else {
NOTREACHED();
}
OpenURLParams params(
url, Referrer(), CURRENT_TAB, content::PAGE_TRANSITION_LINK, false);
web_contents_->OpenURL(params);
return;
}
if (command == kShowPrivacyCommand) {
GURL url(l10n_util::GetStringUTF8(IDS_SAFE_BROWSING_PRIVACY_POLICY_URL));
OpenURLParams params(
url, Referrer(), CURRENT_TAB, content::PAGE_TRANSITION_LINK, false);
web_contents_->OpenURL(params);
return;
}
bool proceed_blocked = false;
if (command == kProceedCommand) {
if (IsPrefEnabled(prefs::kSafeBrowsingProceedAnywayDisabled)) {
proceed_blocked = true;
} else {
interstitial_page_->Proceed();
return;
}
}
if (command == kTakeMeBackCommand || proceed_blocked) {
if (is_main_frame_load_blocked_) {
interstitial_page_->DontProceed();
return;
}
if (web_contents_->GetController().CanGoBack()) {
web_contents_->GetController().GoBack();
} else {
web_contents_->GetController().LoadURL(
GURL(chrome::kChromeUINewTabURL),
content::Referrer(),
content::PAGE_TRANSITION_AUTO_TOPLEVEL,
std::string());
}
return;
}
size_t element_index = 0;
size_t colon_index = command.find(':');
if (colon_index != std::string::npos) {
DCHECK(colon_index < command.size() - 1);
int result_int = 0;
bool result = base::StringToInt(base::StringPiece(command.begin() +
colon_index + 1,
command.end()),
&result_int);
command = command.substr(0, colon_index);
if (result)
element_index = static_cast<size_t>(result_int);
}
if (element_index >= unsafe_resources_.size()) {
NOTREACHED();
return;
}
std::string bad_url_spec = unsafe_resources_[element_index].url.spec();
if (command == kReportErrorCommand) {
SBThreatType threat_type = unsafe_resources_[element_index].threat_type;
DCHECK(threat_type == SB_THREAT_TYPE_URL_PHISHING ||
threat_type == SB_THREAT_TYPE_CLIENT_SIDE_PHISHING_URL);
GURL report_url =
safe_browsing_util::GeneratePhishingReportUrl(
kSbReportPhishingErrorUrl,
bad_url_spec,
threat_type == SB_THREAT_TYPE_CLIENT_SIDE_PHISHING_URL);
OpenURLParams params(
report_url, Referrer(), CURRENT_TAB, content::PAGE_TRANSITION_LINK,
false);
web_contents_->OpenURL(params);
return;
}
if (command == kShowDiagnosticCommand) {
std::string diagnostic =
base::StringPrintf(kSbDiagnosticUrl,
net::EscapeQueryParamValue(bad_url_spec, true).c_str());
GURL diagnostic_url(diagnostic);
diagnostic_url = google_util::AppendGoogleLocaleParam(diagnostic_url);
DCHECK(unsafe_resources_[element_index].threat_type ==
SB_THREAT_TYPE_URL_MALWARE);
OpenURLParams params(
diagnostic_url, Referrer(), CURRENT_TAB, content::PAGE_TRANSITION_LINK,
false);
web_contents_->OpenURL(params);
return;
}
if (command == kExpandedSeeMore) {
return;
}
NOTREACHED() << "Unexpected command: " << command;
}
| 171,396 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void AppCacheUpdateJob::OnDestructionImminent(AppCacheHost* host) {
PendingMasters::iterator found =
pending_master_entries_.find(host->pending_master_entry_url());
DCHECK(found != pending_master_entries_.end());
PendingHosts& hosts = found->second;
PendingHosts::iterator it = std::find(hosts.begin(), hosts.end(), host);
DCHECK(it != hosts.end());
hosts.erase(it);
}
Commit Message: Fix possible map::end() dereference in AppCacheUpdateJob triggered by a compromised renderer.
BUG=551044
Review URL: https://codereview.chromium.org/1418783005
Cr-Commit-Position: refs/heads/master@{#358815}
CWE ID: | void AppCacheUpdateJob::OnDestructionImminent(AppCacheHost* host) {
PendingMasters::iterator found =
pending_master_entries_.find(host->pending_master_entry_url());
CHECK(found != pending_master_entries_.end());
PendingHosts& hosts = found->second;
PendingHosts::iterator it = std::find(hosts.begin(), hosts.end(), host);
CHECK(it != hosts.end());
hosts.erase(it);
}
| 171,743 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int ext4_writepage(struct page *page,
struct writeback_control *wbc)
{
int ret = 0;
loff_t size;
unsigned int len;
struct buffer_head *page_bufs;
struct inode *inode = page->mapping->host;
trace_ext4_writepage(inode, page);
size = i_size_read(inode);
if (page->index == size >> PAGE_CACHE_SHIFT)
len = size & ~PAGE_CACHE_MASK;
else
len = PAGE_CACHE_SIZE;
if (page_has_buffers(page)) {
page_bufs = page_buffers(page);
if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
ext4_bh_delay_or_unwritten)) {
/*
* We don't want to do block allocation
* So redirty the page and return
* We may reach here when we do a journal commit
* via journal_submit_inode_data_buffers.
* If we don't have mapping block we just ignore
* them. We can also reach here via shrink_page_list
*/
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
}
} else {
/*
* The test for page_has_buffers() is subtle:
* We know the page is dirty but it lost buffers. That means
* that at some moment in time after write_begin()/write_end()
* has been called all buffers have been clean and thus they
* must have been written at least once. So they are all
* mapped and we can happily proceed with mapping them
* and writing the page.
*
* Try to initialize the buffer_heads and check whether
* all are mapped and non delay. We don't want to
* do block allocation here.
*/
ret = block_prepare_write(page, 0, len,
noalloc_get_block_write);
if (!ret) {
page_bufs = page_buffers(page);
/* check whether all are mapped and non delay */
if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
ext4_bh_delay_or_unwritten)) {
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
}
} else {
/*
* We can't do block allocation here
* so just redity the page and unlock
* and return
*/
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
}
/* now mark the buffer_heads as dirty and uptodate */
block_commit_write(page, 0, len);
}
if (PageChecked(page) && ext4_should_journal_data(inode)) {
/*
* It's mmapped pagecache. Add buffers and journal it. There
* doesn't seem much point in redirtying the page here.
*/
ClearPageChecked(page);
return __ext4_journalled_writepage(page, len);
}
if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
ret = nobh_writepage(page, noalloc_get_block_write, wbc);
else
ret = block_write_full_page(page, noalloc_get_block_write,
wbc);
return ret;
}
Commit Message: ext4: use ext4_get_block_write in buffer write
Allocate uninitialized extent before ext4 buffer write and
convert the extent to initialized after io completes.
The purpose is to make sure an extent can only be marked
initialized after it has been written with new data so
we can safely drop the i_mutex lock in ext4 DIO read without
exposing stale data. This helps to improve multi-thread DIO
read performance on high-speed disks.
Skip the nobh and data=journal mount cases to make things simple for now.
Signed-off-by: Jiaying Zhang <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]>
CWE ID: | static int ext4_writepage(struct page *page,
struct writeback_control *wbc)
{
int ret = 0;
loff_t size;
unsigned int len;
struct buffer_head *page_bufs = NULL;
struct inode *inode = page->mapping->host;
trace_ext4_writepage(inode, page);
size = i_size_read(inode);
if (page->index == size >> PAGE_CACHE_SHIFT)
len = size & ~PAGE_CACHE_MASK;
else
len = PAGE_CACHE_SIZE;
if (page_has_buffers(page)) {
page_bufs = page_buffers(page);
if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
ext4_bh_delay_or_unwritten)) {
/*
* We don't want to do block allocation
* So redirty the page and return
* We may reach here when we do a journal commit
* via journal_submit_inode_data_buffers.
* If we don't have mapping block we just ignore
* them. We can also reach here via shrink_page_list
*/
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
}
} else {
/*
* The test for page_has_buffers() is subtle:
* We know the page is dirty but it lost buffers. That means
* that at some moment in time after write_begin()/write_end()
* has been called all buffers have been clean and thus they
* must have been written at least once. So they are all
* mapped and we can happily proceed with mapping them
* and writing the page.
*
* Try to initialize the buffer_heads and check whether
* all are mapped and non delay. We don't want to
* do block allocation here.
*/
ret = block_prepare_write(page, 0, len,
noalloc_get_block_write);
if (!ret) {
page_bufs = page_buffers(page);
/* check whether all are mapped and non delay */
if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
ext4_bh_delay_or_unwritten)) {
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
}
} else {
/*
* We can't do block allocation here
* so just redity the page and unlock
* and return
*/
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
}
/* now mark the buffer_heads as dirty and uptodate */
block_commit_write(page, 0, len);
}
if (PageChecked(page) && ext4_should_journal_data(inode)) {
/*
* It's mmapped pagecache. Add buffers and journal it. There
* doesn't seem much point in redirtying the page here.
*/
ClearPageChecked(page);
return __ext4_journalled_writepage(page, len);
}
if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
ret = nobh_writepage(page, noalloc_get_block_write, wbc);
else if (page_bufs && buffer_uninit(page_bufs)) {
ext4_set_bh_endio(page_bufs, inode);
ret = block_write_full_page_endio(page, noalloc_get_block_write,
wbc, ext4_end_io_buffer_write);
} else
ret = block_write_full_page(page, noalloc_get_block_write,
wbc);
return ret;
}
| 167,549 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int ChromeBrowserMainPartsChromeos::PreEarlyInitialization() {
base::CommandLine* singleton_command_line =
base::CommandLine::ForCurrentProcess();
if (parsed_command_line().HasSwitch(switches::kGuestSession)) {
singleton_command_line->AppendSwitch(::switches::kDisableSync);
singleton_command_line->AppendSwitch(::switches::kDisableExtensions);
browser_defaults::bookmarks_enabled = false;
}
if (!base::SysInfo::IsRunningOnChromeOS() &&
!parsed_command_line().HasSwitch(switches::kLoginManager) &&
!parsed_command_line().HasSwitch(switches::kLoginUser) &&
!parsed_command_line().HasSwitch(switches::kGuestSession)) {
singleton_command_line->AppendSwitchASCII(
switches::kLoginUser,
cryptohome::Identification(user_manager::StubAccountId()).id());
if (!parsed_command_line().HasSwitch(switches::kLoginProfile)) {
singleton_command_line->AppendSwitchASCII(switches::kLoginProfile,
chrome::kTestUserProfileDir);
}
LOG(WARNING) << "Running as stub user with profile dir: "
<< singleton_command_line
->GetSwitchValuePath(switches::kLoginProfile)
.value();
}
RegisterStubPathOverridesIfNecessary();
#if defined(GOOGLE_CHROME_BUILD)
const char kChromeOSReleaseTrack[] = "CHROMEOS_RELEASE_TRACK";
std::string channel;
if (base::SysInfo::GetLsbReleaseValue(kChromeOSReleaseTrack, &channel))
chrome::SetChannel(channel);
#endif
dbus_pre_early_init_ = std::make_unique<internal::DBusPreEarlyInit>();
return ChromeBrowserMainPartsLinux::PreEarlyInitialization();
}
Commit Message: Add a fake DriveFS launcher client.
Using DriveFS requires building and deploying ChromeOS. Add a client for
the fake DriveFS launcher to allow the use of a real DriveFS from a
ChromeOS chroot to be used with a target_os="chromeos" build of chrome.
This connects to the fake DriveFS launcher using mojo over a unix domain
socket named by a command-line flag, using the launcher to create
DriveFS instances.
Bug: 848126
Change-Id: I22dcca154d41bda196dd7c1782bb503f6bcba5b1
Reviewed-on: https://chromium-review.googlesource.com/1098434
Reviewed-by: Xiyuan Xia <[email protected]>
Commit-Queue: Sam McNally <[email protected]>
Cr-Commit-Position: refs/heads/master@{#567513}
CWE ID: | int ChromeBrowserMainPartsChromeos::PreEarlyInitialization() {
base::CommandLine* singleton_command_line =
base::CommandLine::ForCurrentProcess();
if (parsed_command_line().HasSwitch(switches::kGuestSession)) {
singleton_command_line->AppendSwitch(::switches::kDisableSync);
singleton_command_line->AppendSwitch(::switches::kDisableExtensions);
browser_defaults::bookmarks_enabled = false;
}
if (!base::SysInfo::IsRunningOnChromeOS() &&
!parsed_command_line().HasSwitch(switches::kLoginManager) &&
!parsed_command_line().HasSwitch(switches::kLoginUser) &&
!parsed_command_line().HasSwitch(switches::kGuestSession)) {
singleton_command_line->AppendSwitchASCII(
switches::kLoginUser,
cryptohome::Identification(user_manager::StubAccountId()).id());
if (!parsed_command_line().HasSwitch(switches::kLoginProfile)) {
singleton_command_line->AppendSwitchASCII(switches::kLoginProfile,
chrome::kTestUserProfileDir);
}
LOG(WARNING) << "Running as stub user with profile dir: "
<< singleton_command_line
->GetSwitchValuePath(switches::kLoginProfile)
.value();
}
RegisterStubPathOverridesIfNecessary();
#if defined(GOOGLE_CHROME_BUILD)
const char kChromeOSReleaseTrack[] = "CHROMEOS_RELEASE_TRACK";
std::string channel;
if (base::SysInfo::GetLsbReleaseValue(kChromeOSReleaseTrack, &channel))
chrome::SetChannel(channel);
#endif
dbus_pre_early_init_ = std::make_unique<internal::DBusPreEarlyInit>();
if (!base::SysInfo::IsRunningOnChromeOS() &&
parsed_command_line().HasSwitch(
switches::kFakeDriveFsLauncherChrootPath) &&
parsed_command_line().HasSwitch(
switches::kFakeDriveFsLauncherSocketPath)) {
drivefs::FakeDriveFsLauncherClient::Init(
parsed_command_line().GetSwitchValuePath(
switches::kFakeDriveFsLauncherChrootPath),
parsed_command_line().GetSwitchValuePath(
switches::kFakeDriveFsLauncherSocketPath));
}
return ChromeBrowserMainPartsLinux::PreEarlyInitialization();
}
| 171,728 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static MagickBooleanType WritePDBImage(const ImageInfo *image_info,Image *image)
{
const char
*comment;
int
bits;
MagickBooleanType
status;
PDBImage
pdb_image;
PDBInfo
pdb_info;
QuantumInfo
*quantum_info;
register const PixelPacket
*p;
register ssize_t
x;
register unsigned char
*q;
size_t
bits_per_pixel,
literal,
packets,
packet_size,
repeat;
ssize_t
y;
unsigned char
*buffer,
*runlength,
*scanline;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace);
if ((image -> colors <= 2 ) ||
(GetImageType(image,&image->exception ) == BilevelType)) {
bits_per_pixel=1;
} else if (image->colors <= 4) {
bits_per_pixel=2;
} else if (image->colors <= 8) {
bits_per_pixel=3;
} else {
bits_per_pixel=4;
}
(void) ResetMagickMemory(&pdb_info,0,sizeof(pdb_info));
(void) CopyMagickString(pdb_info.name,image_info->filename,
sizeof(pdb_info.name));
pdb_info.attributes=0;
pdb_info.version=0;
pdb_info.create_time=time(NULL);
pdb_info.modify_time=pdb_info.create_time;
pdb_info.archive_time=0;
pdb_info.modify_number=0;
pdb_info.application_info=0;
pdb_info.sort_info=0;
(void) CopyMagickMemory(pdb_info.type,"vIMG",4);
(void) CopyMagickMemory(pdb_info.id,"View",4);
pdb_info.seed=0;
pdb_info.next_record=0;
comment=GetImageProperty(image,"comment");
pdb_info.number_records=(comment == (const char *) NULL ? 1 : 2);
(void) WriteBlob(image,sizeof(pdb_info.name),(unsigned char *) pdb_info.name);
(void) WriteBlobMSBShort(image,(unsigned short) pdb_info.attributes);
(void) WriteBlobMSBShort(image,(unsigned short) pdb_info.version);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_info.create_time);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_info.modify_time);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_info.archive_time);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_info.modify_number);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_info.application_info);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_info.sort_info);
(void) WriteBlob(image,4,(unsigned char *) pdb_info.type);
(void) WriteBlob(image,4,(unsigned char *) pdb_info.id);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_info.seed);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_info.next_record);
(void) WriteBlobMSBShort(image,(unsigned short) pdb_info.number_records);
(void) CopyMagickString(pdb_image.name,pdb_info.name,sizeof(pdb_image.name));
pdb_image.version=1; /* RLE Compressed */
switch (bits_per_pixel)
{
case 1: pdb_image.type=(unsigned char) 0xff; break; /* monochrome */
case 2: pdb_image.type=(unsigned char) 0x00; break; /* 2 bit gray */
default: pdb_image.type=(unsigned char) 0x02; /* 4 bit gray */
}
pdb_image.reserved_1=0;
pdb_image.note=0;
pdb_image.x_last=0;
pdb_image.y_last=0;
pdb_image.reserved_2=0;
pdb_image.x_anchor=(unsigned short) 0xffff;
pdb_image.y_anchor=(unsigned short) 0xffff;
pdb_image.width=(short) image->columns;
if (image->columns % 16)
pdb_image.width=(short) (16*(image->columns/16+1));
pdb_image.height=(short) image->rows;
packets=((bits_per_pixel*image->columns+7)/8);
runlength=(unsigned char *) AcquireQuantumMemory(9UL*packets,
image->rows*sizeof(*runlength));
if (runlength == (unsigned char *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
buffer=(unsigned char *) AcquireQuantumMemory(256,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
packet_size=(size_t) (image->depth > 8 ? 2: 1);
scanline=(unsigned char *) AcquireQuantumMemory(image->columns,packet_size*
sizeof(*scanline));
if (scanline == (unsigned char *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace);
/*
Convert to GRAY raster scanline.
*/
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
bits=8/(int) bits_per_pixel-1; /* start at most significant bits */
literal=0;
repeat=0;
q=runlength;
buffer[0]=0x00;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
(void) ExportQuantumPixels(image,(const CacheView *) NULL,quantum_info,
GrayQuantum,scanline,&image->exception);
for (x=0; x < (ssize_t) pdb_image.width; x++)
{
if (x < (ssize_t) image->columns)
buffer[literal+repeat]|=(0xff-scanline[x*packet_size]) >>
(8-bits_per_pixel) << bits*bits_per_pixel;
bits--;
if (bits < 0)
{
if (((literal+repeat) > 0) &&
(buffer[literal+repeat] == buffer[literal+repeat-1]))
{
if (repeat == 0)
{
literal--;
repeat++;
}
repeat++;
if (0x7f < repeat)
{
q=EncodeRLE(q,buffer,literal,repeat);
literal=0;
repeat=0;
}
}
else
{
if (repeat >= 2)
literal+=repeat;
else
{
q=EncodeRLE(q,buffer,literal,repeat);
buffer[0]=buffer[literal+repeat];
literal=0;
}
literal++;
repeat=0;
if (0x7f < literal)
{
q=EncodeRLE(q,buffer,(literal < 0x80 ? literal : 0x80),0);
(void) CopyMagickMemory(buffer,buffer+literal+repeat,0x80);
literal-=0x80;
}
}
bits=8/(int) bits_per_pixel-1;
buffer[literal+repeat]=0x00;
}
}
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
q=EncodeRLE(q,buffer,literal,repeat);
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
quantum_info=DestroyQuantumInfo(quantum_info);
/*
Write the Image record header.
*/
(void) WriteBlobMSBLong(image,(unsigned int) (TellBlob(image)+8*
pdb_info.number_records));
(void) WriteBlobByte(image,0x40);
(void) WriteBlobByte(image,0x6f);
(void) WriteBlobByte(image,0x80);
(void) WriteBlobByte(image,0);
if (pdb_info.number_records > 1)
{
/*
Write the comment record header.
*/
(void) WriteBlobMSBLong(image,(unsigned int) (TellBlob(image)+8+58+q-
runlength));
(void) WriteBlobByte(image,0x40);
(void) WriteBlobByte(image,0x6f);
(void) WriteBlobByte(image,0x80);
(void) WriteBlobByte(image,1);
}
/*
Write the Image data.
*/
(void) WriteBlob(image,sizeof(pdb_image.name),(unsigned char *)
pdb_image.name);
(void) WriteBlobByte(image,(unsigned char) pdb_image.version);
(void) WriteBlobByte(image,pdb_image.type);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_image.reserved_1);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_image.note);
(void) WriteBlobMSBShort(image,(unsigned short) pdb_image.x_last);
(void) WriteBlobMSBShort(image,(unsigned short) pdb_image.y_last);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_image.reserved_2);
(void) WriteBlobMSBShort(image,pdb_image.x_anchor);
(void) WriteBlobMSBShort(image,pdb_image.y_anchor);
(void) WriteBlobMSBShort(image,(unsigned short) pdb_image.width);
(void) WriteBlobMSBShort(image,(unsigned short) pdb_image.height);
(void) WriteBlob(image,(size_t) (q-runlength),runlength);
runlength=(unsigned char *) RelinquishMagickMemory(runlength);
if (pdb_info.number_records > 1)
(void) WriteBlobString(image,comment);
(void) CloseBlob(image);
return(MagickTrue);
}
Commit Message: https://github.com/ImageMagick/ImageMagick/issues/143
CWE ID: CWE-125 | static MagickBooleanType WritePDBImage(const ImageInfo *image_info,Image *image)
{
const char
*comment;
int
bits;
MagickBooleanType
status;
PDBImage
pdb_image;
PDBInfo
pdb_info;
QuantumInfo
*quantum_info;
register const PixelPacket
*p;
register ssize_t
x;
register unsigned char
*q;
size_t
bits_per_pixel,
literal,
packets,
packet_size,
repeat;
ssize_t
y;
unsigned char
*buffer,
*runlength,
*scanline;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace);
if ((image -> colors <= 2 ) ||
(GetImageType(image,&image->exception ) == BilevelType)) {
bits_per_pixel=1;
} else if (image->colors <= 4) {
bits_per_pixel=2;
} else if (image->colors <= 8) {
bits_per_pixel=3;
} else {
bits_per_pixel=4;
}
(void) ResetMagickMemory(&pdb_info,0,sizeof(pdb_info));
(void) CopyMagickString(pdb_info.name,image_info->filename,
sizeof(pdb_info.name));
pdb_info.attributes=0;
pdb_info.version=0;
pdb_info.create_time=time(NULL);
pdb_info.modify_time=pdb_info.create_time;
pdb_info.archive_time=0;
pdb_info.modify_number=0;
pdb_info.application_info=0;
pdb_info.sort_info=0;
(void) CopyMagickMemory(pdb_info.type,"vIMG",4);
(void) CopyMagickMemory(pdb_info.id,"View",4);
pdb_info.seed=0;
pdb_info.next_record=0;
comment=GetImageProperty(image,"comment");
pdb_info.number_records=(comment == (const char *) NULL ? 1 : 2);
(void) WriteBlob(image,sizeof(pdb_info.name),(unsigned char *) pdb_info.name);
(void) WriteBlobMSBShort(image,(unsigned short) pdb_info.attributes);
(void) WriteBlobMSBShort(image,(unsigned short) pdb_info.version);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_info.create_time);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_info.modify_time);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_info.archive_time);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_info.modify_number);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_info.application_info);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_info.sort_info);
(void) WriteBlob(image,4,(unsigned char *) pdb_info.type);
(void) WriteBlob(image,4,(unsigned char *) pdb_info.id);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_info.seed);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_info.next_record);
(void) WriteBlobMSBShort(image,(unsigned short) pdb_info.number_records);
(void) CopyMagickString(pdb_image.name,pdb_info.name,sizeof(pdb_image.name));
pdb_image.version=1; /* RLE Compressed */
switch (bits_per_pixel)
{
case 1: pdb_image.type=(unsigned char) 0xff; break; /* monochrome */
case 2: pdb_image.type=(unsigned char) 0x00; break; /* 2 bit gray */
default: pdb_image.type=(unsigned char) 0x02; /* 4 bit gray */
}
pdb_image.reserved_1=0;
pdb_image.note=0;
pdb_image.x_last=0;
pdb_image.y_last=0;
pdb_image.reserved_2=0;
pdb_image.x_anchor=(unsigned short) 0xffff;
pdb_image.y_anchor=(unsigned short) 0xffff;
pdb_image.width=(short) image->columns;
if (image->columns % 16)
pdb_image.width=(short) (16*(image->columns/16+1));
pdb_image.height=(short) image->rows;
packets=((bits_per_pixel*image->columns+7)/8);
runlength=(unsigned char *) AcquireQuantumMemory(9UL*packets,
image->rows*sizeof(*runlength));
if (runlength == (unsigned char *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
buffer=(unsigned char *) AcquireQuantumMemory(257,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
packet_size=(size_t) (image->depth > 8 ? 2: 1);
scanline=(unsigned char *) AcquireQuantumMemory(image->columns,packet_size*
sizeof(*scanline));
if (scanline == (unsigned char *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace);
/*
Convert to GRAY raster scanline.
*/
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
bits=8/(int) bits_per_pixel-1; /* start at most significant bits */
literal=0;
repeat=0;
q=runlength;
buffer[0]=0x00;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
(void) ExportQuantumPixels(image,(const CacheView *) NULL,quantum_info,
GrayQuantum,scanline,&image->exception);
for (x=0; x < (ssize_t) pdb_image.width; x++)
{
if (x < (ssize_t) image->columns)
buffer[literal+repeat]|=(0xff-scanline[x*packet_size]) >>
(8-bits_per_pixel) << bits*bits_per_pixel;
bits--;
if (bits < 0)
{
if (((literal+repeat) > 0) &&
(buffer[literal+repeat] == buffer[literal+repeat-1]))
{
if (repeat == 0)
{
literal--;
repeat++;
}
repeat++;
if (0x7f < repeat)
{
q=EncodeRLE(q,buffer,literal,repeat);
literal=0;
repeat=0;
}
}
else
{
if (repeat >= 2)
literal+=repeat;
else
{
q=EncodeRLE(q,buffer,literal,repeat);
buffer[0]=buffer[literal+repeat];
literal=0;
}
literal++;
repeat=0;
if (0x7f < literal)
{
q=EncodeRLE(q,buffer,(literal < 0x80 ? literal : 0x80),0);
(void) CopyMagickMemory(buffer,buffer+literal+repeat,0x80);
literal-=0x80;
}
}
bits=8/(int) bits_per_pixel-1;
buffer[literal+repeat]=0x00;
}
}
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
q=EncodeRLE(q,buffer,literal,repeat);
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
quantum_info=DestroyQuantumInfo(quantum_info);
/*
Write the Image record header.
*/
(void) WriteBlobMSBLong(image,(unsigned int) (TellBlob(image)+8*
pdb_info.number_records));
(void) WriteBlobByte(image,0x40);
(void) WriteBlobByte(image,0x6f);
(void) WriteBlobByte(image,0x80);
(void) WriteBlobByte(image,0);
if (pdb_info.number_records > 1)
{
/*
Write the comment record header.
*/
(void) WriteBlobMSBLong(image,(unsigned int) (TellBlob(image)+8+58+q-
runlength));
(void) WriteBlobByte(image,0x40);
(void) WriteBlobByte(image,0x6f);
(void) WriteBlobByte(image,0x80);
(void) WriteBlobByte(image,1);
}
/*
Write the Image data.
*/
(void) WriteBlob(image,sizeof(pdb_image.name),(unsigned char *)
pdb_image.name);
(void) WriteBlobByte(image,(unsigned char) pdb_image.version);
(void) WriteBlobByte(image,pdb_image.type);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_image.reserved_1);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_image.note);
(void) WriteBlobMSBShort(image,(unsigned short) pdb_image.x_last);
(void) WriteBlobMSBShort(image,(unsigned short) pdb_image.y_last);
(void) WriteBlobMSBLong(image,(unsigned int) pdb_image.reserved_2);
(void) WriteBlobMSBShort(image,pdb_image.x_anchor);
(void) WriteBlobMSBShort(image,pdb_image.y_anchor);
(void) WriteBlobMSBShort(image,(unsigned short) pdb_image.width);
(void) WriteBlobMSBShort(image,(unsigned short) pdb_image.height);
(void) WriteBlob(image,(size_t) (q-runlength),runlength);
runlength=(unsigned char *) RelinquishMagickMemory(runlength);
if (pdb_info.number_records > 1)
(void) WriteBlobString(image,comment);
(void) CloseBlob(image);
return(MagickTrue);
}
| 168,791 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: updateDevice(const struct header * headers, time_t t)
{
struct device ** pp = &devlist;
struct device * p = *pp; /* = devlist; */
while(p)
{
if( p->headers[HEADER_NT].l == headers[HEADER_NT].l
&& (0==memcmp(p->headers[HEADER_NT].p, headers[HEADER_NT].p, headers[HEADER_NT].l))
&& p->headers[HEADER_USN].l == headers[HEADER_USN].l
&& (0==memcmp(p->headers[HEADER_USN].p, headers[HEADER_USN].p, headers[HEADER_USN].l)) )
{
/*printf("found! %d\n", (int)(t - p->t));*/
syslog(LOG_DEBUG, "device updated : %.*s", headers[HEADER_USN].l, headers[HEADER_USN].p);
p->t = t;
/* update Location ! */
if(headers[HEADER_LOCATION].l > p->headers[HEADER_LOCATION].l)
{
struct device * tmp;
tmp = realloc(p, sizeof(struct device)
+ headers[0].l+headers[1].l+headers[2].l);
if(!tmp) /* allocation error */
{
syslog(LOG_ERR, "updateDevice() : memory allocation error");
free(p);
return 0;
}
p = tmp;
*pp = p;
}
memcpy(p->data + p->headers[0].l + p->headers[1].l,
headers[2].p, headers[2].l);
/* TODO : check p->headers[HEADER_LOCATION].l */
return 0;
}
pp = &p->next;
p = *pp; /* p = p->next; */
}
syslog(LOG_INFO, "new device discovered : %.*s",
headers[HEADER_USN].l, headers[HEADER_USN].p);
/* add */
{
char * pc;
int i;
p = malloc( sizeof(struct device)
+ headers[0].l+headers[1].l+headers[2].l );
if(!p) {
syslog(LOG_ERR, "updateDevice(): cannot allocate memory");
return -1;
}
p->next = devlist;
p->t = t;
pc = p->data;
for(i = 0; i < 3; i++)
{
p->headers[i].p = pc;
p->headers[i].l = headers[i].l;
memcpy(pc, headers[i].p, headers[i].l);
pc += headers[i].l;
}
devlist = p;
sendNotifications(NOTIF_NEW, p, NULL);
}
return 1;
}
Commit Message: updateDevice() remove element from the list when realloc fails
CWE ID: CWE-416 | updateDevice(const struct header * headers, time_t t)
{
struct device ** pp = &devlist;
struct device * p = *pp; /* = devlist; */
while(p)
{
if( p->headers[HEADER_NT].l == headers[HEADER_NT].l
&& (0==memcmp(p->headers[HEADER_NT].p, headers[HEADER_NT].p, headers[HEADER_NT].l))
&& p->headers[HEADER_USN].l == headers[HEADER_USN].l
&& (0==memcmp(p->headers[HEADER_USN].p, headers[HEADER_USN].p, headers[HEADER_USN].l)) )
{
/*printf("found! %d\n", (int)(t - p->t));*/
syslog(LOG_DEBUG, "device updated : %.*s", headers[HEADER_USN].l, headers[HEADER_USN].p);
p->t = t;
/* update Location ! */
if(headers[HEADER_LOCATION].l > p->headers[HEADER_LOCATION].l)
{
struct device * tmp;
tmp = realloc(p, sizeof(struct device)
+ headers[0].l+headers[1].l+headers[2].l);
if(!tmp) /* allocation error */
{
syslog(LOG_ERR, "updateDevice() : memory allocation error");
*pp = p->next; /* remove "p" from the list */
free(p);
return 0;
}
p = tmp;
*pp = p;
}
memcpy(p->data + p->headers[0].l + p->headers[1].l,
headers[2].p, headers[2].l);
/* TODO : check p->headers[HEADER_LOCATION].l */
return 0;
}
pp = &p->next;
p = *pp; /* p = p->next; */
}
syslog(LOG_INFO, "new device discovered : %.*s",
headers[HEADER_USN].l, headers[HEADER_USN].p);
/* add */
{
char * pc;
int i;
p = malloc( sizeof(struct device)
+ headers[0].l+headers[1].l+headers[2].l );
if(!p) {
syslog(LOG_ERR, "updateDevice(): cannot allocate memory");
return -1;
}
p->next = devlist;
p->t = t;
pc = p->data;
for(i = 0; i < 3; i++)
{
p->headers[i].p = pc;
p->headers[i].l = headers[i].l;
memcpy(pc, headers[i].p, headers[i].l);
pc += headers[i].l;
}
devlist = p;
sendNotifications(NOTIF_NEW, p, NULL);
}
return 1;
}
| 169,669 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
{
int rc;
unsigned char key2[8];
struct crypto_skcipher *tfm_des;
struct scatterlist sgin, sgout;
struct skcipher_request *req;
str_to_key(key, key2);
tfm_des = crypto_alloc_skcipher("ecb(des)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm_des)) {
rc = PTR_ERR(tfm_des);
cifs_dbg(VFS, "could not allocate des crypto API\n");
goto smbhash_err;
}
req = skcipher_request_alloc(tfm_des, GFP_KERNEL);
if (!req) {
rc = -ENOMEM;
cifs_dbg(VFS, "could not allocate des crypto API\n");
goto smbhash_free_skcipher;
}
crypto_skcipher_setkey(tfm_des, key2, 8);
sg_init_one(&sgin, in, 8);
sg_init_one(&sgout, out, 8);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sgin, &sgout, 8, NULL);
rc = crypto_skcipher_encrypt(req);
if (rc)
cifs_dbg(VFS, "could not encrypt crypt key rc: %d\n", rc);
skcipher_request_free(req);
smbhash_free_skcipher:
crypto_free_skcipher(tfm_des);
smbhash_err:
return rc;
}
Commit Message: cifs: Fix smbencrypt() to stop pointing a scatterlist at the stack
smbencrypt() points a scatterlist to the stack, which is breaks if
CONFIG_VMAP_STACK=y.
Fix it by switching to crypto_cipher_encrypt_one(). The new code
should be considerably faster as an added benefit.
This code is nearly identical to some code that Eric Biggers
suggested.
Cc: [email protected] # 4.9 only
Reported-by: Eric Biggers <[email protected]>
Signed-off-by: Andy Lutomirski <[email protected]>
Acked-by: Jeff Layton <[email protected]>
Signed-off-by: Steve French <[email protected]>
CWE ID: CWE-119 | smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
{
unsigned char key2[8];
struct crypto_cipher *tfm_des;
str_to_key(key, key2);
tfm_des = crypto_alloc_cipher("des", 0, 0);
if (IS_ERR(tfm_des)) {
cifs_dbg(VFS, "could not allocate des crypto API\n");
return PTR_ERR(tfm_des);
}
crypto_cipher_setkey(tfm_des, key2, 8);
crypto_cipher_encrypt_one(tfm_des, out, in);
crypto_free_cipher(tfm_des);
return 0;
}
| 168,518 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: main(int argc, char *argv[])
{
static char buff[16384];
struct cpio _cpio; /* Allocated on stack. */
struct cpio *cpio;
const char *errmsg;
int uid, gid;
int opt;
cpio = &_cpio;
memset(cpio, 0, sizeof(*cpio));
cpio->buff = buff;
cpio->buff_size = sizeof(buff);
#if defined(HAVE_SIGACTION) && defined(SIGPIPE)
{ /* Ignore SIGPIPE signals. */
struct sigaction sa;
sigemptyset(&sa.sa_mask);
sa.sa_flags = 0;
sa.sa_handler = SIG_IGN;
sigaction(SIGPIPE, &sa, NULL);
}
#endif
/* Set lafe_progname before calling lafe_warnc. */
lafe_setprogname(*argv, "bsdcpio");
#if HAVE_SETLOCALE
if (setlocale(LC_ALL, "") == NULL)
lafe_warnc(0, "Failed to set default locale");
#endif
cpio->uid_override = -1;
cpio->gid_override = -1;
cpio->argv = argv;
cpio->argc = argc;
cpio->mode = '\0';
cpio->verbose = 0;
cpio->compress = '\0';
cpio->extract_flags = ARCHIVE_EXTRACT_NO_AUTODIR;
cpio->extract_flags |= ARCHIVE_EXTRACT_NO_OVERWRITE_NEWER;
cpio->extract_flags |= ARCHIVE_EXTRACT_SECURE_SYMLINKS;
cpio->extract_flags |= ARCHIVE_EXTRACT_SECURE_NODOTDOT;
cpio->extract_flags |= ARCHIVE_EXTRACT_PERM;
cpio->extract_flags |= ARCHIVE_EXTRACT_FFLAGS;
cpio->extract_flags |= ARCHIVE_EXTRACT_ACL;
#if !defined(_WIN32) && !defined(__CYGWIN__)
if (geteuid() == 0)
cpio->extract_flags |= ARCHIVE_EXTRACT_OWNER;
#endif
cpio->bytes_per_block = 512;
cpio->filename = NULL;
cpio->matching = archive_match_new();
if (cpio->matching == NULL)
lafe_errc(1, 0, "Out of memory");
while ((opt = cpio_getopt(cpio)) != -1) {
switch (opt) {
case '0': /* GNU convention: --null, -0 */
cpio->option_null = 1;
break;
case 'A': /* NetBSD/OpenBSD */
cpio->option_append = 1;
break;
case 'a': /* POSIX 1997 */
cpio->option_atime_restore = 1;
break;
case 'B': /* POSIX 1997 */
cpio->bytes_per_block = 5120;
break;
case OPTION_B64ENCODE:
cpio->add_filter = opt;
break;
case 'C': /* NetBSD/OpenBSD */
cpio->bytes_per_block = atoi(cpio->argument);
if (cpio->bytes_per_block <= 0)
lafe_errc(1, 0, "Invalid blocksize %s", cpio->argument);
break;
case 'c': /* POSIX 1997 */
cpio->format = "odc";
break;
case 'd': /* POSIX 1997 */
cpio->extract_flags &= ~ARCHIVE_EXTRACT_NO_AUTODIR;
break;
case 'E': /* NetBSD/OpenBSD */
if (archive_match_include_pattern_from_file(
cpio->matching, cpio->argument,
cpio->option_null) != ARCHIVE_OK)
lafe_errc(1, 0, "Error : %s",
archive_error_string(cpio->matching));
break;
case 'F': /* NetBSD/OpenBSD/GNU cpio */
cpio->filename = cpio->argument;
break;
case 'f': /* POSIX 1997 */
if (archive_match_exclude_pattern(cpio->matching,
cpio->argument) != ARCHIVE_OK)
lafe_errc(1, 0, "Error : %s",
archive_error_string(cpio->matching));
break;
case OPTION_GRZIP:
cpio->compress = opt;
break;
case 'H': /* GNU cpio (also --format) */
cpio->format = cpio->argument;
break;
case 'h':
long_help();
break;
case 'I': /* NetBSD/OpenBSD */
cpio->filename = cpio->argument;
break;
case 'i': /* POSIX 1997 */
if (cpio->mode != '\0')
lafe_errc(1, 0,
"Cannot use both -i and -%c", cpio->mode);
cpio->mode = opt;
break;
case 'J': /* GNU tar, others */
cpio->compress = opt;
break;
case 'j': /* GNU tar, others */
cpio->compress = opt;
break;
case OPTION_INSECURE:
cpio->extract_flags &= ~ARCHIVE_EXTRACT_SECURE_SYMLINKS;
cpio->extract_flags &= ~ARCHIVE_EXTRACT_SECURE_NODOTDOT;
break;
case 'L': /* GNU cpio */
cpio->option_follow_links = 1;
break;
case 'l': /* POSIX 1997 */
cpio->option_link = 1;
break;
case OPTION_LRZIP:
case OPTION_LZ4:
case OPTION_LZMA: /* GNU tar, others */
case OPTION_LZOP: /* GNU tar, others */
cpio->compress = opt;
break;
case 'm': /* POSIX 1997 */
cpio->extract_flags |= ARCHIVE_EXTRACT_TIME;
break;
case 'n': /* GNU cpio */
cpio->option_numeric_uid_gid = 1;
break;
case OPTION_NO_PRESERVE_OWNER: /* GNU cpio */
cpio->extract_flags &= ~ARCHIVE_EXTRACT_OWNER;
break;
case 'O': /* GNU cpio */
cpio->filename = cpio->argument;
break;
case 'o': /* POSIX 1997 */
if (cpio->mode != '\0')
lafe_errc(1, 0,
"Cannot use both -o and -%c", cpio->mode);
cpio->mode = opt;
break;
case 'p': /* POSIX 1997 */
if (cpio->mode != '\0')
lafe_errc(1, 0,
"Cannot use both -p and -%c", cpio->mode);
cpio->mode = opt;
cpio->extract_flags &= ~ARCHIVE_EXTRACT_SECURE_NODOTDOT;
break;
case OPTION_PASSPHRASE:
cpio->passphrase = cpio->argument;
break;
case OPTION_PRESERVE_OWNER:
cpio->extract_flags |= ARCHIVE_EXTRACT_OWNER;
break;
case OPTION_QUIET: /* GNU cpio */
cpio->quiet = 1;
break;
case 'R': /* GNU cpio, also --owner */
/* TODO: owner_parse should return uname/gname
* also; use that to set [ug]name_override. */
errmsg = owner_parse(cpio->argument, &uid, &gid);
if (errmsg) {
lafe_warnc(-1, "%s", errmsg);
usage();
}
if (uid != -1) {
cpio->uid_override = uid;
cpio->uname_override = NULL;
}
if (gid != -1) {
cpio->gid_override = gid;
cpio->gname_override = NULL;
}
break;
case 'r': /* POSIX 1997 */
cpio->option_rename = 1;
break;
case 't': /* POSIX 1997 */
cpio->option_list = 1;
break;
case 'u': /* POSIX 1997 */
cpio->extract_flags
&= ~ARCHIVE_EXTRACT_NO_OVERWRITE_NEWER;
break;
case OPTION_UUENCODE:
cpio->add_filter = opt;
break;
case 'v': /* POSIX 1997 */
cpio->verbose++;
break;
case 'V': /* GNU cpio */
cpio->dot++;
break;
case OPTION_VERSION: /* GNU convention */
version();
break;
#if 0
/*
* cpio_getopt() handles -W specially, so it's not
* available here.
*/
case 'W': /* Obscure, but useful GNU convention. */
break;
#endif
case 'y': /* tar convention */
cpio->compress = opt;
break;
case 'Z': /* tar convention */
cpio->compress = opt;
break;
case 'z': /* tar convention */
cpio->compress = opt;
break;
default:
usage();
}
}
/*
* Sanity-check args, error out on nonsensical combinations.
*/
/* -t implies -i if no mode was specified. */
if (cpio->option_list && cpio->mode == '\0')
cpio->mode = 'i';
/* -t requires -i */
if (cpio->option_list && cpio->mode != 'i')
lafe_errc(1, 0, "Option -t requires -i");
/* -n requires -it */
if (cpio->option_numeric_uid_gid && !cpio->option_list)
lafe_errc(1, 0, "Option -n requires -it");
/* Can only specify format when writing */
if (cpio->format != NULL && cpio->mode != 'o')
lafe_errc(1, 0, "Option --format requires -o");
/* -l requires -p */
if (cpio->option_link && cpio->mode != 'p')
lafe_errc(1, 0, "Option -l requires -p");
/* -v overrides -V */
if (cpio->dot && cpio->verbose)
cpio->dot = 0;
/* TODO: Flag other nonsensical combinations. */
switch (cpio->mode) {
case 'o':
/* TODO: Implement old binary format in libarchive,
use that here. */
if (cpio->format == NULL)
cpio->format = "odc"; /* Default format */
mode_out(cpio);
break;
case 'i':
while (*cpio->argv != NULL) {
if (archive_match_include_pattern(cpio->matching,
*cpio->argv) != ARCHIVE_OK)
lafe_errc(1, 0, "Error : %s",
archive_error_string(cpio->matching));
--cpio->argc;
++cpio->argv;
}
if (cpio->option_list)
mode_list(cpio);
else
mode_in(cpio);
break;
case 'p':
if (*cpio->argv == NULL || **cpio->argv == '\0')
lafe_errc(1, 0,
"-p mode requires a target directory");
mode_pass(cpio, *cpio->argv);
break;
default:
lafe_errc(1, 0,
"Must specify at least one of -i, -o, or -p");
}
archive_match_free(cpio->matching);
free_cache(cpio->gname_cache);
free_cache(cpio->uname_cache);
free(cpio->destdir);
passphrase_free(cpio->ppbuff);
return (cpio->return_value);
}
Commit Message: Add ARCHIVE_EXTRACT_SECURE_NOABSOLUTEPATHS option
This fixes a directory traversal in the cpio tool.
CWE ID: CWE-22 | main(int argc, char *argv[])
{
static char buff[16384];
struct cpio _cpio; /* Allocated on stack. */
struct cpio *cpio;
const char *errmsg;
int uid, gid;
int opt;
cpio = &_cpio;
memset(cpio, 0, sizeof(*cpio));
cpio->buff = buff;
cpio->buff_size = sizeof(buff);
#if defined(HAVE_SIGACTION) && defined(SIGPIPE)
{ /* Ignore SIGPIPE signals. */
struct sigaction sa;
sigemptyset(&sa.sa_mask);
sa.sa_flags = 0;
sa.sa_handler = SIG_IGN;
sigaction(SIGPIPE, &sa, NULL);
}
#endif
/* Set lafe_progname before calling lafe_warnc. */
lafe_setprogname(*argv, "bsdcpio");
#if HAVE_SETLOCALE
if (setlocale(LC_ALL, "") == NULL)
lafe_warnc(0, "Failed to set default locale");
#endif
cpio->uid_override = -1;
cpio->gid_override = -1;
cpio->argv = argv;
cpio->argc = argc;
cpio->mode = '\0';
cpio->verbose = 0;
cpio->compress = '\0';
cpio->extract_flags = ARCHIVE_EXTRACT_NO_AUTODIR;
cpio->extract_flags |= ARCHIVE_EXTRACT_NO_OVERWRITE_NEWER;
cpio->extract_flags |= ARCHIVE_EXTRACT_SECURE_SYMLINKS;
cpio->extract_flags |= ARCHIVE_EXTRACT_SECURE_NODOTDOT;
cpio->extract_flags |= ARCHIVE_EXTRACT_SECURE_NOABSOLUTEPATHS;
cpio->extract_flags |= ARCHIVE_EXTRACT_PERM;
cpio->extract_flags |= ARCHIVE_EXTRACT_FFLAGS;
cpio->extract_flags |= ARCHIVE_EXTRACT_ACL;
#if !defined(_WIN32) && !defined(__CYGWIN__)
if (geteuid() == 0)
cpio->extract_flags |= ARCHIVE_EXTRACT_OWNER;
#endif
cpio->bytes_per_block = 512;
cpio->filename = NULL;
cpio->matching = archive_match_new();
if (cpio->matching == NULL)
lafe_errc(1, 0, "Out of memory");
while ((opt = cpio_getopt(cpio)) != -1) {
switch (opt) {
case '0': /* GNU convention: --null, -0 */
cpio->option_null = 1;
break;
case 'A': /* NetBSD/OpenBSD */
cpio->option_append = 1;
break;
case 'a': /* POSIX 1997 */
cpio->option_atime_restore = 1;
break;
case 'B': /* POSIX 1997 */
cpio->bytes_per_block = 5120;
break;
case OPTION_B64ENCODE:
cpio->add_filter = opt;
break;
case 'C': /* NetBSD/OpenBSD */
cpio->bytes_per_block = atoi(cpio->argument);
if (cpio->bytes_per_block <= 0)
lafe_errc(1, 0, "Invalid blocksize %s", cpio->argument);
break;
case 'c': /* POSIX 1997 */
cpio->format = "odc";
break;
case 'd': /* POSIX 1997 */
cpio->extract_flags &= ~ARCHIVE_EXTRACT_NO_AUTODIR;
break;
case 'E': /* NetBSD/OpenBSD */
if (archive_match_include_pattern_from_file(
cpio->matching, cpio->argument,
cpio->option_null) != ARCHIVE_OK)
lafe_errc(1, 0, "Error : %s",
archive_error_string(cpio->matching));
break;
case 'F': /* NetBSD/OpenBSD/GNU cpio */
cpio->filename = cpio->argument;
break;
case 'f': /* POSIX 1997 */
if (archive_match_exclude_pattern(cpio->matching,
cpio->argument) != ARCHIVE_OK)
lafe_errc(1, 0, "Error : %s",
archive_error_string(cpio->matching));
break;
case OPTION_GRZIP:
cpio->compress = opt;
break;
case 'H': /* GNU cpio (also --format) */
cpio->format = cpio->argument;
break;
case 'h':
long_help();
break;
case 'I': /* NetBSD/OpenBSD */
cpio->filename = cpio->argument;
break;
case 'i': /* POSIX 1997 */
if (cpio->mode != '\0')
lafe_errc(1, 0,
"Cannot use both -i and -%c", cpio->mode);
cpio->mode = opt;
break;
case 'J': /* GNU tar, others */
cpio->compress = opt;
break;
case 'j': /* GNU tar, others */
cpio->compress = opt;
break;
case OPTION_INSECURE:
cpio->extract_flags &= ~ARCHIVE_EXTRACT_SECURE_SYMLINKS;
cpio->extract_flags &= ~ARCHIVE_EXTRACT_SECURE_NODOTDOT;
cpio->extract_flags &= ~ARCHIVE_EXTRACT_SECURE_NOABSOLUTEPATHS;
break;
case 'L': /* GNU cpio */
cpio->option_follow_links = 1;
break;
case 'l': /* POSIX 1997 */
cpio->option_link = 1;
break;
case OPTION_LRZIP:
case OPTION_LZ4:
case OPTION_LZMA: /* GNU tar, others */
case OPTION_LZOP: /* GNU tar, others */
cpio->compress = opt;
break;
case 'm': /* POSIX 1997 */
cpio->extract_flags |= ARCHIVE_EXTRACT_TIME;
break;
case 'n': /* GNU cpio */
cpio->option_numeric_uid_gid = 1;
break;
case OPTION_NO_PRESERVE_OWNER: /* GNU cpio */
cpio->extract_flags &= ~ARCHIVE_EXTRACT_OWNER;
break;
case 'O': /* GNU cpio */
cpio->filename = cpio->argument;
break;
case 'o': /* POSIX 1997 */
if (cpio->mode != '\0')
lafe_errc(1, 0,
"Cannot use both -o and -%c", cpio->mode);
cpio->mode = opt;
break;
case 'p': /* POSIX 1997 */
if (cpio->mode != '\0')
lafe_errc(1, 0,
"Cannot use both -p and -%c", cpio->mode);
cpio->mode = opt;
cpio->extract_flags &= ~ARCHIVE_EXTRACT_SECURE_NODOTDOT;
break;
case OPTION_PASSPHRASE:
cpio->passphrase = cpio->argument;
break;
case OPTION_PRESERVE_OWNER:
cpio->extract_flags |= ARCHIVE_EXTRACT_OWNER;
break;
case OPTION_QUIET: /* GNU cpio */
cpio->quiet = 1;
break;
case 'R': /* GNU cpio, also --owner */
/* TODO: owner_parse should return uname/gname
* also; use that to set [ug]name_override. */
errmsg = owner_parse(cpio->argument, &uid, &gid);
if (errmsg) {
lafe_warnc(-1, "%s", errmsg);
usage();
}
if (uid != -1) {
cpio->uid_override = uid;
cpio->uname_override = NULL;
}
if (gid != -1) {
cpio->gid_override = gid;
cpio->gname_override = NULL;
}
break;
case 'r': /* POSIX 1997 */
cpio->option_rename = 1;
break;
case 't': /* POSIX 1997 */
cpio->option_list = 1;
break;
case 'u': /* POSIX 1997 */
cpio->extract_flags
&= ~ARCHIVE_EXTRACT_NO_OVERWRITE_NEWER;
break;
case OPTION_UUENCODE:
cpio->add_filter = opt;
break;
case 'v': /* POSIX 1997 */
cpio->verbose++;
break;
case 'V': /* GNU cpio */
cpio->dot++;
break;
case OPTION_VERSION: /* GNU convention */
version();
break;
#if 0
/*
* cpio_getopt() handles -W specially, so it's not
* available here.
*/
case 'W': /* Obscure, but useful GNU convention. */
break;
#endif
case 'y': /* tar convention */
cpio->compress = opt;
break;
case 'Z': /* tar convention */
cpio->compress = opt;
break;
case 'z': /* tar convention */
cpio->compress = opt;
break;
default:
usage();
}
}
/*
* Sanity-check args, error out on nonsensical combinations.
*/
/* -t implies -i if no mode was specified. */
if (cpio->option_list && cpio->mode == '\0')
cpio->mode = 'i';
/* -t requires -i */
if (cpio->option_list && cpio->mode != 'i')
lafe_errc(1, 0, "Option -t requires -i");
/* -n requires -it */
if (cpio->option_numeric_uid_gid && !cpio->option_list)
lafe_errc(1, 0, "Option -n requires -it");
/* Can only specify format when writing */
if (cpio->format != NULL && cpio->mode != 'o')
lafe_errc(1, 0, "Option --format requires -o");
/* -l requires -p */
if (cpio->option_link && cpio->mode != 'p')
lafe_errc(1, 0, "Option -l requires -p");
/* -v overrides -V */
if (cpio->dot && cpio->verbose)
cpio->dot = 0;
/* TODO: Flag other nonsensical combinations. */
switch (cpio->mode) {
case 'o':
/* TODO: Implement old binary format in libarchive,
use that here. */
if (cpio->format == NULL)
cpio->format = "odc"; /* Default format */
mode_out(cpio);
break;
case 'i':
while (*cpio->argv != NULL) {
if (archive_match_include_pattern(cpio->matching,
*cpio->argv) != ARCHIVE_OK)
lafe_errc(1, 0, "Error : %s",
archive_error_string(cpio->matching));
--cpio->argc;
++cpio->argv;
}
if (cpio->option_list)
mode_list(cpio);
else
mode_in(cpio);
break;
case 'p':
if (*cpio->argv == NULL || **cpio->argv == '\0')
lafe_errc(1, 0,
"-p mode requires a target directory");
mode_pass(cpio, *cpio->argv);
break;
default:
lafe_errc(1, 0,
"Must specify at least one of -i, -o, or -p");
}
archive_match_free(cpio->matching);
free_cache(cpio->gname_cache);
free_cache(cpio->uname_cache);
free(cpio->destdir);
passphrase_free(cpio->ppbuff);
return (cpio->return_value);
}
| 166,680 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
int op_flag,
unsigned int size,
unsigned int __user *tlv)
{
struct user_element *ue = kcontrol->private_data;
int change = 0;
void *new_data;
if (op_flag > 0) {
if (size > 1024 * 128) /* sane value */
return -EINVAL;
new_data = memdup_user(tlv, size);
if (IS_ERR(new_data))
return PTR_ERR(new_data);
change = ue->tlv_data_size != size;
if (!change)
change = memcmp(ue->tlv_data, new_data, size);
kfree(ue->tlv_data);
ue->tlv_data = new_data;
ue->tlv_data_size = size;
} else {
if (! ue->tlv_data_size || ! ue->tlv_data)
return -ENXIO;
if (size < ue->tlv_data_size)
return -ENOSPC;
if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size))
return -EFAULT;
}
return change;
}
Commit Message: ALSA: control: Protect user controls against concurrent access
The user-control put and get handlers as well as the tlv do not protect against
concurrent access from multiple threads. Since the state of the control is not
updated atomically it is possible that either two write operations or a write
and a read operation race against each other. Both can lead to arbitrary memory
disclosure. This patch introduces a new lock that protects user-controls from
concurrent access. Since applications typically access controls sequentially
than in parallel a single lock per card should be fine.
Signed-off-by: Lars-Peter Clausen <[email protected]>
Acked-by: Jaroslav Kysela <[email protected]>
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
CWE ID: CWE-362 | static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
int op_flag,
unsigned int size,
unsigned int __user *tlv)
{
struct user_element *ue = kcontrol->private_data;
int change = 0;
void *new_data;
if (op_flag > 0) {
if (size > 1024 * 128) /* sane value */
return -EINVAL;
new_data = memdup_user(tlv, size);
if (IS_ERR(new_data))
return PTR_ERR(new_data);
mutex_lock(&ue->card->user_ctl_lock);
change = ue->tlv_data_size != size;
if (!change)
change = memcmp(ue->tlv_data, new_data, size);
kfree(ue->tlv_data);
ue->tlv_data = new_data;
ue->tlv_data_size = size;
mutex_unlock(&ue->card->user_ctl_lock);
} else {
int ret = 0;
mutex_lock(&ue->card->user_ctl_lock);
if (!ue->tlv_data_size || !ue->tlv_data) {
ret = -ENXIO;
goto err_unlock;
}
if (size < ue->tlv_data_size) {
ret = -ENOSPC;
goto err_unlock;
}
if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size))
ret = -EFAULT;
err_unlock:
mutex_unlock(&ue->card->user_ctl_lock);
if (ret)
return ret;
}
return change;
}
| 166,299 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void SkiaOutputSurfaceImpl::Reshape(const gfx::Size& size,
float device_scale_factor,
const gfx::ColorSpace& color_space,
bool has_alpha,
bool use_stencil) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (initialize_waitable_event_) {
initialize_waitable_event_->Wait();
initialize_waitable_event_ = nullptr;
}
SkSurfaceCharacterization* characterization = nullptr;
if (characterization_.isValid()) {
characterization_ =
characterization_.createResized(size.width(), size.height());
RecreateRootRecorder();
} else {
characterization = &characterization_;
initialize_waitable_event_ = std::make_unique<base::WaitableEvent>(
base::WaitableEvent::ResetPolicy::MANUAL,
base::WaitableEvent::InitialState::NOT_SIGNALED);
}
auto callback = base::BindOnce(
&SkiaOutputSurfaceImplOnGpu::Reshape,
base::Unretained(impl_on_gpu_.get()), size, device_scale_factor,
std::move(color_space), has_alpha, use_stencil, pre_transform_,
characterization, initialize_waitable_event_.get());
ScheduleGpuTask(std::move(callback), std::vector<gpu::SyncToken>());
}
Commit Message: SkiaRenderer: Support changing color space
SkiaOutputSurfaceImpl did not handle the color space changing after it
was created previously. The SkSurfaceCharacterization color space was
only set during the first time Reshape() ran when the charactization is
returned from the GPU thread. If the color space was changed later the
SkSurface and SkDDL color spaces no longer matched and draw failed.
Bug: 1009452
Change-Id: Ib6d2083efc7e7eb6f94782342e92a809b69d6fdc
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1841811
Reviewed-by: Peng Huang <[email protected]>
Commit-Queue: kylechar <[email protected]>
Cr-Commit-Position: refs/heads/master@{#702946}
CWE ID: CWE-704 | void SkiaOutputSurfaceImpl::Reshape(const gfx::Size& size,
float device_scale_factor,
const gfx::ColorSpace& color_space,
bool has_alpha,
bool use_stencil) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (initialize_waitable_event_) {
initialize_waitable_event_->Wait();
initialize_waitable_event_.reset();
}
SkSurfaceCharacterization* characterization = nullptr;
if (characterization_.isValid()) {
sk_sp<SkColorSpace> sk_color_space = color_space.ToSkColorSpace();
if (!SkColorSpace::Equals(characterization_.refColorSpace().get(),
sk_color_space.get())) {
characterization_ = characterization_.createColorSpace(sk_color_space);
}
if (size.width() != characterization_.width() ||
size.height() != characterization_.height()) {
characterization_ =
characterization_.createResized(size.width(), size.height());
}
// TODO(kylechar): Update |characterization_| if |use_alpha| changes.
RecreateRootRecorder();
} else {
characterization = &characterization_;
initialize_waitable_event_ = std::make_unique<base::WaitableEvent>(
base::WaitableEvent::ResetPolicy::MANUAL,
base::WaitableEvent::InitialState::NOT_SIGNALED);
}
auto task = base::BindOnce(&SkiaOutputSurfaceImplOnGpu::Reshape,
base::Unretained(impl_on_gpu_.get()), size,
device_scale_factor, color_space, has_alpha,
use_stencil, pre_transform_, characterization,
initialize_waitable_event_.get());
ScheduleGpuTask(std::move(task), {});
}
| 172,317 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static ssize_t get_node_path_locked(struct node* node, char* buf, size_t bufsize) {
const char* name;
size_t namelen;
if (node->graft_path) {
name = node->graft_path;
namelen = node->graft_pathlen;
} else if (node->actual_name) {
name = node->actual_name;
namelen = node->namelen;
} else {
name = node->name;
namelen = node->namelen;
}
if (bufsize < namelen + 1) {
return -1;
}
ssize_t pathlen = 0;
if (node->parent && node->graft_path == NULL) {
pathlen = get_node_path_locked(node->parent, buf, bufsize - namelen - 2);
if (pathlen < 0) {
return -1;
}
buf[pathlen++] = '/';
}
memcpy(buf + pathlen, name, namelen + 1); /* include trailing \0 */
return pathlen + namelen;
}
Commit Message: Fix overflow in path building
An incorrect size was causing an unsigned value
to wrap, causing it to write past the end of
the buffer.
Bug: 28085658
Change-Id: Ie9625c729cca024d514ba2880ff97209d435a165
CWE ID: CWE-264 | static ssize_t get_node_path_locked(struct node* node, char* buf, size_t bufsize) {
const char* name;
size_t namelen;
if (node->graft_path) {
name = node->graft_path;
namelen = node->graft_pathlen;
} else if (node->actual_name) {
name = node->actual_name;
namelen = node->namelen;
} else {
name = node->name;
namelen = node->namelen;
}
if (bufsize < namelen + 1) {
return -1;
}
ssize_t pathlen = 0;
if (node->parent && node->graft_path == NULL) {
pathlen = get_node_path_locked(node->parent, buf, bufsize - namelen - 1);
if (pathlen < 0) {
return -1;
}
buf[pathlen++] = '/';
}
memcpy(buf + pathlen, name, namelen + 1); /* include trailing \0 */
return pathlen + namelen;
}
| 173,774 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void StreamTcpPacketSetState(Packet *p, TcpSession *ssn,
uint8_t state)
{
if (state == ssn->state || PKT_IS_PSEUDOPKT(p))
return;
ssn->state = state;
/* update the flow state */
switch(ssn->state) {
case TCP_ESTABLISHED:
case TCP_FIN_WAIT1:
case TCP_FIN_WAIT2:
case TCP_CLOSING:
case TCP_CLOSE_WAIT:
FlowUpdateState(p->flow, FLOW_STATE_ESTABLISHED);
break;
case TCP_LAST_ACK:
case TCP_TIME_WAIT:
case TCP_CLOSED:
FlowUpdateState(p->flow, FLOW_STATE_CLOSED);
break;
}
}
Commit Message: stream: support RST getting lost/ignored
In case of a valid RST on a SYN, the state is switched to 'TCP_CLOSED'.
However, the target of the RST may not have received it, or may not
have accepted it. Also, the RST may have been injected, so the supposed
sender may not actually be aware of the RST that was sent in it's name.
In this case the previous behavior was to switch the state to CLOSED and
accept no further TCP updates or stream reassembly.
This patch changes this. It still switches the state to CLOSED, as this
is by far the most likely to be correct. However, it will reconsider
the state if the receiver continues to talk.
To do this on each state change the previous state will be recorded in
TcpSession::pstate. If a non-RST packet is received after a RST, this
TcpSession::pstate is used to try to continue the conversation.
If the (supposed) sender of the RST is also continueing the conversation
as normal, it's highly likely it didn't send the RST. In this case
a stream event is generated.
Ticket: #2501
Reported-By: Kirill Shipulin
CWE ID: | static void StreamTcpPacketSetState(Packet *p, TcpSession *ssn,
uint8_t state)
{
if (state == ssn->state || PKT_IS_PSEUDOPKT(p))
return;
ssn->pstate = ssn->state;
ssn->state = state;
/* update the flow state */
switch(ssn->state) {
case TCP_ESTABLISHED:
case TCP_FIN_WAIT1:
case TCP_FIN_WAIT2:
case TCP_CLOSING:
case TCP_CLOSE_WAIT:
FlowUpdateState(p->flow, FLOW_STATE_ESTABLISHED);
break;
case TCP_LAST_ACK:
case TCP_TIME_WAIT:
case TCP_CLOSED:
FlowUpdateState(p->flow, FLOW_STATE_CLOSED);
break;
}
}
| 169,115 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int wdm_post_reset(struct usb_interface *intf)
{
struct wdm_device *desc = wdm_find_device(intf);
int rv;
clear_bit(WDM_RESETTING, &desc->flags);
rv = recover_from_urb_loss(desc);
mutex_unlock(&desc->wlock);
mutex_unlock(&desc->rlock);
return 0;
}
Commit Message: USB: cdc-wdm: fix buffer overflow
The buffer for responses must not overflow.
If this would happen, set a flag, drop the data and return
an error after user space has read all remaining data.
Signed-off-by: Oliver Neukum <[email protected]>
CC: [email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]>
CWE ID: CWE-119 | static int wdm_post_reset(struct usb_interface *intf)
{
struct wdm_device *desc = wdm_find_device(intf);
int rv;
clear_bit(WDM_OVERFLOW, &desc->flags);
clear_bit(WDM_RESETTING, &desc->flags);
rv = recover_from_urb_loss(desc);
mutex_unlock(&desc->wlock);
mutex_unlock(&desc->rlock);
return 0;
}
| 166,104 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned int insn = 0;
int si_code, fault_code, fault;
unsigned long address, mm_rss;
fault_code = get_thread_fault_code();
if (notify_page_fault(regs))
return;
si_code = SEGV_MAPERR;
address = current_thread_info()->fault_address;
if ((fault_code & FAULT_CODE_ITLB) &&
(fault_code & FAULT_CODE_DTLB))
BUG();
if (test_thread_flag(TIF_32BIT)) {
if (!(regs->tstate & TSTATE_PRIV)) {
if (unlikely((regs->tpc >> 32) != 0)) {
bogus_32bit_fault_tpc(regs);
goto intr_or_no_mm;
}
}
if (unlikely((address >> 32) != 0)) {
bogus_32bit_fault_address(regs, address);
goto intr_or_no_mm;
}
}
if (regs->tstate & TSTATE_PRIV) {
unsigned long tpc = regs->tpc;
/* Sanity check the PC. */
if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
(tpc >= MODULES_VADDR && tpc < MODULES_END)) {
/* Valid, no problems... */
} else {
bad_kernel_pc(regs, address);
return;
}
}
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto intr_or_no_mm;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
if (!down_read_trylock(&mm->mmap_sem)) {
if ((regs->tstate & TSTATE_PRIV) &&
!search_exception_tables(regs->tpc)) {
insn = get_fault_insn(regs, insn);
goto handle_kernel_fault;
}
down_read(&mm->mmap_sem);
}
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
/* Pure DTLB misses do not tell us whether the fault causing
* load/store/atomic was a write or not, it only says that there
* was no match. So in such a case we (carefully) read the
* instruction to try and figure this out. It's an optimization
* so it's ok if we can't do this.
*
* Special hack, window spill/fill knows the exact fault type.
*/
if (((fault_code &
(FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
(vma->vm_flags & VM_WRITE) != 0) {
insn = get_fault_insn(regs, 0);
if (!insn)
goto continue_fault;
/* All loads, stores and atomics have bits 30 and 31 both set
* in the instruction. Bit 21 is set in all stores, but we
* have to avoid prefetches which also have bit 21 set.
*/
if ((insn & 0xc0200000) == 0xc0200000 &&
(insn & 0x01780000) != 0x01680000) {
/* Don't bother updating thread struct value,
* because update_mmu_cache only cares which tlb
* the access came from.
*/
fault_code |= FAULT_CODE_WRITE;
}
}
continue_fault:
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (!(fault_code & FAULT_CODE_WRITE)) {
/* Non-faulting loads shouldn't expand stack. */
insn = get_fault_insn(regs, insn);
if ((insn & 0xc0800000) == 0xc0800000) {
unsigned char asi;
if (insn & 0x2000)
asi = (regs->tstate >> 24);
else
asi = (insn >> 5);
if ((asi & 0xf2) == 0x82)
goto bad_area;
}
}
if (expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
si_code = SEGV_ACCERR;
/* If we took a ITLB miss on a non-executable page, catch
* that here.
*/
if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
BUG_ON(address != regs->tpc);
BUG_ON(regs->tstate & TSTATE_PRIV);
goto bad_area;
}
if (fault_code & FAULT_CODE_WRITE) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
/* Spitfire has an icache which does not snoop
* processor stores. Later processors do...
*/
if (tlb_type == spitfire &&
(vma->vm_flags & VM_EXEC) != 0 &&
vma->vm_file != NULL)
set_thread_fault_code(fault_code |
FAULT_CODE_BLKCOMMIT);
} else {
/* Allow reads even for write-only mappings */
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR) {
current->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
up_read(&mm->mmap_sem);
mm_rss = get_mm_rss(mm);
#ifdef CONFIG_HUGETLB_PAGE
mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
#endif
if (unlikely(mm_rss >
mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
tsb_grow(mm, MM_TSB_BASE, mm_rss);
#ifdef CONFIG_HUGETLB_PAGE
mm_rss = mm->context.huge_pte_count;
if (unlikely(mm_rss >
mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit))
tsb_grow(mm, MM_TSB_HUGE, mm_rss);
#endif
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
insn = get_fault_insn(regs, insn);
up_read(&mm->mmap_sem);
handle_kernel_fault:
do_kernel_fault(regs, si_code, fault_code, insn, address);
return;
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
insn = get_fault_insn(regs, insn);
up_read(&mm->mmap_sem);
if (!(regs->tstate & TSTATE_PRIV)) {
pagefault_out_of_memory();
return;
}
goto handle_kernel_fault;
intr_or_no_mm:
insn = get_fault_insn(regs, 0);
goto handle_kernel_fault;
do_sigbus:
insn = get_fault_insn(regs, insn);
up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
/* Kernel mode? Handle exceptions or die */
if (regs->tstate & TSTATE_PRIV)
goto handle_kernel_fault;
}
Commit Message: perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.
For the various event classes:
- hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
the PMI-tail (ARM etc.)
- tracepoint: nmi=0; since tracepoint could be from NMI context.
- software: nmi=[0,1]; some, like the schedule thing cannot
perform wakeups, and hence need 0.
As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).
The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.
Signed-off-by: Peter Zijlstra <[email protected]>
Cc: Michael Cree <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Deng-Cheng Zhu <[email protected]>
Cc: Anton Blanchard <[email protected]>
Cc: Eric B Munson <[email protected]>
Cc: Heiko Carstens <[email protected]>
Cc: Paul Mundt <[email protected]>
Cc: David S. Miller <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: Jason Wessel <[email protected]>
Cc: Don Zickus <[email protected]>
Link: http://lkml.kernel.org/n/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
CWE ID: CWE-399 | asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned int insn = 0;
int si_code, fault_code, fault;
unsigned long address, mm_rss;
fault_code = get_thread_fault_code();
if (notify_page_fault(regs))
return;
si_code = SEGV_MAPERR;
address = current_thread_info()->fault_address;
if ((fault_code & FAULT_CODE_ITLB) &&
(fault_code & FAULT_CODE_DTLB))
BUG();
if (test_thread_flag(TIF_32BIT)) {
if (!(regs->tstate & TSTATE_PRIV)) {
if (unlikely((regs->tpc >> 32) != 0)) {
bogus_32bit_fault_tpc(regs);
goto intr_or_no_mm;
}
}
if (unlikely((address >> 32) != 0)) {
bogus_32bit_fault_address(regs, address);
goto intr_or_no_mm;
}
}
if (regs->tstate & TSTATE_PRIV) {
unsigned long tpc = regs->tpc;
/* Sanity check the PC. */
if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
(tpc >= MODULES_VADDR && tpc < MODULES_END)) {
/* Valid, no problems... */
} else {
bad_kernel_pc(regs, address);
return;
}
}
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto intr_or_no_mm;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (!down_read_trylock(&mm->mmap_sem)) {
if ((regs->tstate & TSTATE_PRIV) &&
!search_exception_tables(regs->tpc)) {
insn = get_fault_insn(regs, insn);
goto handle_kernel_fault;
}
down_read(&mm->mmap_sem);
}
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
/* Pure DTLB misses do not tell us whether the fault causing
* load/store/atomic was a write or not, it only says that there
* was no match. So in such a case we (carefully) read the
* instruction to try and figure this out. It's an optimization
* so it's ok if we can't do this.
*
* Special hack, window spill/fill knows the exact fault type.
*/
if (((fault_code &
(FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
(vma->vm_flags & VM_WRITE) != 0) {
insn = get_fault_insn(regs, 0);
if (!insn)
goto continue_fault;
/* All loads, stores and atomics have bits 30 and 31 both set
* in the instruction. Bit 21 is set in all stores, but we
* have to avoid prefetches which also have bit 21 set.
*/
if ((insn & 0xc0200000) == 0xc0200000 &&
(insn & 0x01780000) != 0x01680000) {
/* Don't bother updating thread struct value,
* because update_mmu_cache only cares which tlb
* the access came from.
*/
fault_code |= FAULT_CODE_WRITE;
}
}
continue_fault:
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (!(fault_code & FAULT_CODE_WRITE)) {
/* Non-faulting loads shouldn't expand stack. */
insn = get_fault_insn(regs, insn);
if ((insn & 0xc0800000) == 0xc0800000) {
unsigned char asi;
if (insn & 0x2000)
asi = (regs->tstate >> 24);
else
asi = (insn >> 5);
if ((asi & 0xf2) == 0x82)
goto bad_area;
}
}
if (expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
si_code = SEGV_ACCERR;
/* If we took a ITLB miss on a non-executable page, catch
* that here.
*/
if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
BUG_ON(address != regs->tpc);
BUG_ON(regs->tstate & TSTATE_PRIV);
goto bad_area;
}
if (fault_code & FAULT_CODE_WRITE) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
/* Spitfire has an icache which does not snoop
* processor stores. Later processors do...
*/
if (tlb_type == spitfire &&
(vma->vm_flags & VM_EXEC) != 0 &&
vma->vm_file != NULL)
set_thread_fault_code(fault_code |
FAULT_CODE_BLKCOMMIT);
} else {
/* Allow reads even for write-only mappings */
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR) {
current->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
} else {
current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
}
up_read(&mm->mmap_sem);
mm_rss = get_mm_rss(mm);
#ifdef CONFIG_HUGETLB_PAGE
mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
#endif
if (unlikely(mm_rss >
mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
tsb_grow(mm, MM_TSB_BASE, mm_rss);
#ifdef CONFIG_HUGETLB_PAGE
mm_rss = mm->context.huge_pte_count;
if (unlikely(mm_rss >
mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit))
tsb_grow(mm, MM_TSB_HUGE, mm_rss);
#endif
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
insn = get_fault_insn(regs, insn);
up_read(&mm->mmap_sem);
handle_kernel_fault:
do_kernel_fault(regs, si_code, fault_code, insn, address);
return;
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
insn = get_fault_insn(regs, insn);
up_read(&mm->mmap_sem);
if (!(regs->tstate & TSTATE_PRIV)) {
pagefault_out_of_memory();
return;
}
goto handle_kernel_fault;
intr_or_no_mm:
insn = get_fault_insn(regs, 0);
goto handle_kernel_fault;
do_sigbus:
insn = get_fault_insn(regs, insn);
up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
/* Kernel mode? Handle exceptions or die */
if (regs->tstate & TSTATE_PRIV)
goto handle_kernel_fault;
}
| 165,817 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int build_segment_manager(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct f2fs_sm_info *sm_info;
int err;
sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
if (!sm_info)
return -ENOMEM;
/* init sm info */
sbi->sm_info = sm_info;
sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
sm_info->rec_prefree_segments = sm_info->main_segments *
DEF_RECLAIM_PREFREE_SEGMENTS / 100;
if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
if (!test_opt(sbi, LFS))
sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
INIT_LIST_HEAD(&sm_info->sit_entry_set);
if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
err = create_flush_cmd_control(sbi);
if (err)
return err;
}
err = create_discard_cmd_control(sbi);
if (err)
return err;
err = build_sit_info(sbi);
if (err)
return err;
err = build_free_segmap(sbi);
if (err)
return err;
err = build_curseg(sbi);
if (err)
return err;
/* reinit free segmap based on SIT */
build_sit_entries(sbi);
init_free_segmap(sbi);
err = build_dirty_segmap(sbi);
if (err)
return err;
init_min_max_mtime(sbi);
return 0;
}
Commit Message: f2fs: fix a panic caused by NULL flush_cmd_control
Mount fs with option noflush_merge, boot failed for illegal address
fcc in function f2fs_issue_flush:
if (!test_opt(sbi, FLUSH_MERGE)) {
ret = submit_flush_wait(sbi);
atomic_inc(&fcc->issued_flush); -> Here, fcc illegal
return ret;
}
Signed-off-by: Yunlei He <[email protected]>
Signed-off-by: Jaegeuk Kim <[email protected]>
CWE ID: CWE-476 | int build_segment_manager(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct f2fs_sm_info *sm_info;
int err;
sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
if (!sm_info)
return -ENOMEM;
/* init sm info */
sbi->sm_info = sm_info;
sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
sm_info->rec_prefree_segments = sm_info->main_segments *
DEF_RECLAIM_PREFREE_SEGMENTS / 100;
if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
if (!test_opt(sbi, LFS))
sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
INIT_LIST_HEAD(&sm_info->sit_entry_set);
if (!f2fs_readonly(sbi->sb)) {
err = create_flush_cmd_control(sbi);
if (err)
return err;
}
err = create_discard_cmd_control(sbi);
if (err)
return err;
err = build_sit_info(sbi);
if (err)
return err;
err = build_free_segmap(sbi);
if (err)
return err;
err = build_curseg(sbi);
if (err)
return err;
/* reinit free segmap based on SIT */
build_sit_entries(sbi);
init_free_segmap(sbi);
err = build_dirty_segmap(sbi);
if (err)
return err;
init_min_max_mtime(sbi);
return 0;
}
| 169,381 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void Ins_JMPR( INS_ARG )
{
CUR.IP += (Int)(args[0]);
CUR.step_ins = FALSE;
* allow for simple cases here by just checking the preceding byte.
* Fonts with this problem are not uncommon.
*/
CUR.IP -= 1;
}
Commit Message:
CWE ID: CWE-125 | static void Ins_JMPR( INS_ARG )
{
if ( BOUNDS(CUR.IP + args[0], CUR.codeSize ) )
{
CUR.error = TT_Err_Invalid_Reference;
return;
}
CUR.IP += (Int)(args[0]);
CUR.step_ins = FALSE;
* allow for simple cases here by just checking the preceding byte.
* Fonts with this problem are not uncommon.
*/
CUR.IP -= 1;
}
| 164,778 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: TestPaintArtifact& TestPaintArtifact::Chunk(
scoped_refptr<const TransformPaintPropertyNode> transform,
scoped_refptr<const ClipPaintPropertyNode> clip,
scoped_refptr<const EffectPaintPropertyNode> effect) {
return Chunk(NewClient(), transform, clip, effect);
}
Commit Message: Reland "[CI] Make paint property nodes non-ref-counted"
This reverts commit 887383b30842d9d9006e11bb6932660a3cb5b1b7.
Reason for revert: Retry in M69.
Original change's description:
> Revert "[CI] Make paint property nodes non-ref-counted"
>
> This reverts commit 70fc0b018c9517558b7aa2be00edf2debb449123.
>
> Reason for revert: Caused bugs found by clusterfuzz
>
> Original change's description:
> > [CI] Make paint property nodes non-ref-counted
> >
> > Now all paint property nodes are owned by ObjectPaintProperties
> > (and LocalFrameView temporarily before removing non-RLS mode).
> > Others just use raw pointers or references.
> >
> > Bug: 833496
> > Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
> > Change-Id: I2d544fe153bb94698623248748df63c8aa2081ae
> > Reviewed-on: https://chromium-review.googlesource.com/1031101
> > Reviewed-by: Tien-Ren Chen <[email protected]>
> > Commit-Queue: Xianzhu Wang <[email protected]>
> > Cr-Commit-Position: refs/heads/master@{#554626}
>
> [email protected],[email protected],[email protected]
>
> Change-Id: I02bb50d6744cb81a797246a0116b677e80a3c69f
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Bug: 833496,837932,837943
> Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
> Reviewed-on: https://chromium-review.googlesource.com/1034292
> Reviewed-by: Xianzhu Wang <[email protected]>
> Commit-Queue: Xianzhu Wang <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#554653}
[email protected],[email protected],[email protected]
# Not skipping CQ checks because original CL landed > 1 day ago.
Bug: 833496, 837932, 837943
Change-Id: I0b4ef70db1f1f211ba97c30d617225355c750992
Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
Reviewed-on: https://chromium-review.googlesource.com/1083491
Commit-Queue: Xianzhu Wang <[email protected]>
Reviewed-by: Xianzhu Wang <[email protected]>
Cr-Commit-Position: refs/heads/master@{#563930}
CWE ID: | TestPaintArtifact& TestPaintArtifact::Chunk(
const TransformPaintPropertyNode& transform,
const ClipPaintPropertyNode& clip,
const EffectPaintPropertyNode& effect) {
return Chunk(NewClient(), transform, clip, effect);
}
| 171,846 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
size_t copied;
struct sk_buff *skb;
int er;
/*
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
lock_sock(sk);
if (sk->sk_state != TCP_ESTABLISHED) {
release_sock(sk);
return -ENOTCONN;
}
/* Now we can treat all alike */
if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) {
release_sock(sk);
return er;
}
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (er < 0) {
skb_free_datagram(sk, skb);
release_sock(sk);
return er;
}
if (sax != NULL) {
memset(sax, 0, sizeof(sax));
sax->sax25_family = AF_NETROM;
skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
AX25_ADDR_LEN);
}
msg->msg_namelen = sizeof(*sax);
skb_free_datagram(sk, skb);
release_sock(sk);
return copied;
}
Commit Message: netrom: fix invalid use of sizeof in nr_recvmsg()
sizeof() when applied to a pointer typed expression gives the size of the
pointer, not that of the pointed data.
Introduced by commit 3ce5ef(netrom: fix info leak via msg_name in nr_recvmsg)
Signed-off-by: Wei Yongjun <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-200 | static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
size_t copied;
struct sk_buff *skb;
int er;
/*
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
lock_sock(sk);
if (sk->sk_state != TCP_ESTABLISHED) {
release_sock(sk);
return -ENOTCONN;
}
/* Now we can treat all alike */
if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) {
release_sock(sk);
return er;
}
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (er < 0) {
skb_free_datagram(sk, skb);
release_sock(sk);
return er;
}
if (sax != NULL) {
memset(sax, 0, sizeof(*sax));
sax->sax25_family = AF_NETROM;
skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
AX25_ADDR_LEN);
}
msg->msg_namelen = sizeof(*sax);
skb_free_datagram(sk, skb);
release_sock(sk);
return copied;
}
| 169,894 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void ImportCbYCrYQuantum(const Image *image,QuantumInfo *quantum_info,
const MagickSizeType number_pixels,const unsigned char *magick_restrict p,
Quantum *magick_restrict q,ExceptionInfo *exception)
{
QuantumAny
range;
register ssize_t
x;
unsigned int
pixel;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
switch (quantum_info->depth)
{
case 10:
{
Quantum
cbcr[4];
pixel=0;
if (quantum_info->pack == MagickFalse)
{
register ssize_t
i;
size_t
quantum;
ssize_t
n;
n=0;
quantum=0;
for (x=0; x < (ssize_t) number_pixels; x+=2)
{
for (i=0; i < 4; i++)
{
switch (n % 3)
{
case 0:
{
p=PushLongPixel(quantum_info->endian,p,&pixel);
quantum=(size_t) (ScaleShortToQuantum((unsigned short)
(((pixel >> 22) & 0x3ff) << 6)));
break;
}
case 1:
{
quantum=(size_t) (ScaleShortToQuantum((unsigned short)
(((pixel >> 12) & 0x3ff) << 6)));
break;
}
case 2:
{
quantum=(size_t) (ScaleShortToQuantum((unsigned short)
(((pixel >> 2) & 0x3ff) << 6)));
break;
}
}
cbcr[i]=(Quantum) (quantum);
n++;
}
p+=quantum_info->pad;
SetPixelRed(image,cbcr[1],q);
SetPixelGreen(image,cbcr[0],q);
SetPixelBlue(image,cbcr[2],q);
q+=GetPixelChannels(image);
SetPixelRed(image,cbcr[3],q);
SetPixelGreen(image,cbcr[0],q);
SetPixelBlue(image,cbcr[2],q);
q+=GetPixelChannels(image);
}
break;
}
}
default:
{
range=GetQuantumRange(quantum_info->depth);
for (x=0; x < (ssize_t) number_pixels; x++)
{
p=PushQuantumPixel(quantum_info,p,&pixel);
SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q);
p=PushQuantumPixel(quantum_info,p,&pixel);
SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q);
q+=GetPixelChannels(image);
}
break;
}
}
}
Commit Message: https://github.com/ImageMagick/ImageMagick/issues/126
CWE ID: CWE-125 | static void ImportCbYCrYQuantum(const Image *image,QuantumInfo *quantum_info,
const MagickSizeType number_pixels,const unsigned char *magick_restrict p,
Quantum *magick_restrict q,ExceptionInfo *exception)
{
QuantumAny
range;
register ssize_t
x;
unsigned int
pixel;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
switch (quantum_info->depth)
{
case 10:
{
Quantum
cbcr[4];
pixel=0;
if (quantum_info->pack == MagickFalse)
{
register ssize_t
i;
size_t
quantum;
ssize_t
n;
n=0;
quantum=0;
for (x=0; x < (ssize_t) number_pixels; x+=4)
{
for (i=0; i < 4; i++)
{
switch (n % 3)
{
case 0:
{
p=PushLongPixel(quantum_info->endian,p,&pixel);
quantum=(size_t) (ScaleShortToQuantum((unsigned short)
(((pixel >> 22) & 0x3ff) << 6)));
break;
}
case 1:
{
quantum=(size_t) (ScaleShortToQuantum((unsigned short)
(((pixel >> 12) & 0x3ff) << 6)));
break;
}
case 2:
{
quantum=(size_t) (ScaleShortToQuantum((unsigned short)
(((pixel >> 2) & 0x3ff) << 6)));
break;
}
}
cbcr[i]=(Quantum) (quantum);
n++;
}
p+=quantum_info->pad;
SetPixelRed(image,cbcr[1],q);
SetPixelGreen(image,cbcr[0],q);
SetPixelBlue(image,cbcr[2],q);
q+=GetPixelChannels(image);
SetPixelRed(image,cbcr[3],q);
SetPixelGreen(image,cbcr[0],q);
SetPixelBlue(image,cbcr[2],q);
q+=GetPixelChannels(image);
}
break;
}
}
default:
{
range=GetQuantumRange(quantum_info->depth);
for (x=0; x < (ssize_t) number_pixels; x++)
{
p=PushQuantumPixel(quantum_info,p,&pixel);
SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q);
p=PushQuantumPixel(quantum_info,p,&pixel);
SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q);
q+=GetPixelChannels(image);
}
break;
}
}
}
| 168,793 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void GLOutputSurfaceBufferQueueAndroid::HandlePartialSwap(
const gfx::Rect& sub_buffer_rect,
uint32_t flags,
gpu::ContextSupport::SwapCompletedCallback swap_callback,
gpu::ContextSupport::PresentationCallback presentation_callback) {
DCHECK(sub_buffer_rect.IsEmpty());
context_provider_->ContextSupport()->CommitOverlayPlanes(
flags, std::move(swap_callback), std::move(presentation_callback));
}
Commit Message: gpu/android : Add support for partial swap with surface control.
Add support for PostSubBuffer to GLSurfaceEGLSurfaceControl. This should
allow the display compositor to draw the minimum sub-rect necessary from
the damage tracking in BufferQueue on the client-side, and also to pass
this damage rect to the framework.
[email protected]
Bug: 926020
Change-Id: I73d3320cab68250d4c6865bf21c5531682d8bf61
Reviewed-on: https://chromium-review.googlesource.com/c/1457467
Commit-Queue: Khushal <[email protected]>
Commit-Queue: Antoine Labour <[email protected]>
Reviewed-by: Antoine Labour <[email protected]>
Auto-Submit: Khushal <[email protected]>
Cr-Commit-Position: refs/heads/master@{#629852}
CWE ID: | void GLOutputSurfaceBufferQueueAndroid::HandlePartialSwap(
| 172,104 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void scsi_dma_restart_bh(void *opaque)
{
SCSIDiskState *s = opaque;
SCSIRequest *req;
SCSIDiskReq *r;
qemu_bh_delete(s->bh);
s->bh = NULL;
QTAILQ_FOREACH(req, &s->qdev.requests, next) {
r = DO_UPCAST(SCSIDiskReq, req, req);
if (r->status & SCSI_REQ_STATUS_RETRY) {
int status = r->status;
int ret;
r->status &=
~(SCSI_REQ_STATUS_RETRY | SCSI_REQ_STATUS_RETRY_TYPE_MASK);
switch (status & SCSI_REQ_STATUS_RETRY_TYPE_MASK) {
case SCSI_REQ_STATUS_RETRY_READ:
scsi_read_data(&r->req);
break;
case SCSI_REQ_STATUS_RETRY_WRITE:
scsi_write_data(&r->req);
break;
case SCSI_REQ_STATUS_RETRY_FLUSH:
ret = scsi_disk_emulate_command(r, r->iov.iov_base);
if (ret == 0) {
scsi_req_complete(&r->req, GOOD);
}
}
}
}
}
Commit Message: scsi-disk: lazily allocate bounce buffer
It will not be needed for reads and writes if the HBA provides a sglist.
In addition, this lets scsi-disk refuse commands with an excessive
allocation length, as well as limit memory on usual well-behaved guests.
Signed-off-by: Paolo Bonzini <[email protected]>
Signed-off-by: Kevin Wolf <[email protected]>
CWE ID: CWE-119 | static void scsi_dma_restart_bh(void *opaque)
{
SCSIDiskState *s = opaque;
SCSIRequest *req;
SCSIDiskReq *r;
qemu_bh_delete(s->bh);
s->bh = NULL;
QTAILQ_FOREACH(req, &s->qdev.requests, next) {
r = DO_UPCAST(SCSIDiskReq, req, req);
if (r->status & SCSI_REQ_STATUS_RETRY) {
int status = r->status;
int ret;
r->status &=
~(SCSI_REQ_STATUS_RETRY | SCSI_REQ_STATUS_RETRY_TYPE_MASK);
switch (status & SCSI_REQ_STATUS_RETRY_TYPE_MASK) {
case SCSI_REQ_STATUS_RETRY_READ:
scsi_read_data(&r->req);
break;
case SCSI_REQ_STATUS_RETRY_WRITE:
scsi_write_data(&r->req);
break;
case SCSI_REQ_STATUS_RETRY_FLUSH:
ret = scsi_disk_emulate_command(r);
if (ret == 0) {
scsi_req_complete(&r->req, GOOD);
}
}
}
}
}
| 166,552 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
compat_ulong_t, maxnode)
{
long err = 0;
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
DECLARE_BITMAP(bm, MAX_NUMNODES);
nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) {
err = compat_get_bitmap(bm, nmask, nr_bits);
nm = compat_alloc_user_space(alloc_size);
err |= copy_to_user(nm, bm, alloc_size);
}
if (err)
return -EFAULT;
return sys_set_mempolicy(mode, nm, nr_bits+1);
}
Commit Message: mm/mempolicy.c: fix error handling in set_mempolicy and mbind.
In the case that compat_get_bitmap fails we do not want to copy the
bitmap to the user as it will contain uninitialized stack data and leak
sensitive data.
Signed-off-by: Chris Salls <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
CWE ID: CWE-388 | COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
compat_ulong_t, maxnode)
{
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
DECLARE_BITMAP(bm, MAX_NUMNODES);
nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) {
if (compat_get_bitmap(bm, nmask, nr_bits))
return -EFAULT;
nm = compat_alloc_user_space(alloc_size);
if (copy_to_user(nm, bm, alloc_size))
return -EFAULT;
}
return sys_set_mempolicy(mode, nm, nr_bits+1);
}
| 168,257 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int user_update(struct key *key, struct key_preparsed_payload *prep)
{
struct user_key_payload *upayload, *zap;
size_t datalen = prep->datalen;
int ret;
ret = -EINVAL;
if (datalen <= 0 || datalen > 32767 || !prep->data)
goto error;
/* construct a replacement payload */
ret = -ENOMEM;
upayload = kmalloc(sizeof(*upayload) + datalen, GFP_KERNEL);
if (!upayload)
goto error;
upayload->datalen = datalen;
memcpy(upayload->data, prep->data, datalen);
/* check the quota and attach the new data */
zap = upayload;
ret = key_payload_reserve(key, datalen);
if (ret == 0) {
/* attach the new data, displacing the old */
zap = key->payload.data[0];
rcu_assign_keypointer(key, upayload);
key->expiry = 0;
}
if (zap)
kfree_rcu(zap, rcu);
error:
return ret;
}
Commit Message: KEYS: Fix handling of stored error in a negatively instantiated user key
If a user key gets negatively instantiated, an error code is cached in the
payload area. A negatively instantiated key may be then be positively
instantiated by updating it with valid data. However, the ->update key
type method must be aware that the error code may be there.
The following may be used to trigger the bug in the user key type:
keyctl request2 user user "" @u
keyctl add user user "a" @u
which manifests itself as:
BUG: unable to handle kernel paging request at 00000000ffffff8a
IP: [<ffffffff810a376f>] __call_rcu.constprop.76+0x1f/0x280 kernel/rcu/tree.c:3046
PGD 7cc30067 PUD 0
Oops: 0002 [#1] SMP
Modules linked in:
CPU: 3 PID: 2644 Comm: a.out Not tainted 4.3.0+ #49
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
task: ffff88003ddea700 ti: ffff88003dd88000 task.ti: ffff88003dd88000
RIP: 0010:[<ffffffff810a376f>] [<ffffffff810a376f>] __call_rcu.constprop.76+0x1f/0x280
[<ffffffff810a376f>] __call_rcu.constprop.76+0x1f/0x280 kernel/rcu/tree.c:3046
RSP: 0018:ffff88003dd8bdb0 EFLAGS: 00010246
RAX: 00000000ffffff82 RBX: 0000000000000000 RCX: 0000000000000001
RDX: ffffffff81e3fe40 RSI: 0000000000000000 RDI: 00000000ffffff82
RBP: ffff88003dd8bde0 R08: ffff88007d2d2da0 R09: 0000000000000000
R10: 0000000000000000 R11: ffff88003e8073c0 R12: 00000000ffffff82
R13: ffff88003dd8be68 R14: ffff88007d027600 R15: ffff88003ddea700
FS: 0000000000b92880(0063) GS:ffff88007fd00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: 00000000ffffff8a CR3: 000000007cc5f000 CR4: 00000000000006e0
Stack:
ffff88003dd8bdf0 ffffffff81160a8a 0000000000000000 00000000ffffff82
ffff88003dd8be68 ffff88007d027600 ffff88003dd8bdf0 ffffffff810a39e5
ffff88003dd8be20 ffffffff812a31ab ffff88007d027600 ffff88007d027620
Call Trace:
[<ffffffff810a39e5>] kfree_call_rcu+0x15/0x20 kernel/rcu/tree.c:3136
[<ffffffff812a31ab>] user_update+0x8b/0xb0 security/keys/user_defined.c:129
[< inline >] __key_update security/keys/key.c:730
[<ffffffff8129e5c1>] key_create_or_update+0x291/0x440 security/keys/key.c:908
[< inline >] SYSC_add_key security/keys/keyctl.c:125
[<ffffffff8129fc21>] SyS_add_key+0x101/0x1e0 security/keys/keyctl.c:60
[<ffffffff8185f617>] entry_SYSCALL_64_fastpath+0x12/0x6a arch/x86/entry/entry_64.S:185
Note the error code (-ENOKEY) in EDX.
A similar bug can be tripped by:
keyctl request2 trusted user "" @u
keyctl add trusted user "a" @u
This should also affect encrypted keys - but that has to be correctly
parameterised or it will fail with EINVAL before getting to the bit that
will crashes.
Reported-by: Dmitry Vyukov <[email protected]>
Signed-off-by: David Howells <[email protected]>
Acked-by: Mimi Zohar <[email protected]>
Signed-off-by: James Morris <[email protected]>
CWE ID: CWE-264 | int user_update(struct key *key, struct key_preparsed_payload *prep)
{
struct user_key_payload *upayload, *zap;
size_t datalen = prep->datalen;
int ret;
ret = -EINVAL;
if (datalen <= 0 || datalen > 32767 || !prep->data)
goto error;
/* construct a replacement payload */
ret = -ENOMEM;
upayload = kmalloc(sizeof(*upayload) + datalen, GFP_KERNEL);
if (!upayload)
goto error;
upayload->datalen = datalen;
memcpy(upayload->data, prep->data, datalen);
/* check the quota and attach the new data */
zap = upayload;
ret = key_payload_reserve(key, datalen);
if (ret == 0) {
/* attach the new data, displacing the old */
if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags))
zap = key->payload.data[0];
else
zap = NULL;
rcu_assign_keypointer(key, upayload);
key->expiry = 0;
}
if (zap)
kfree_rcu(zap, rcu);
error:
return ret;
}
| 167,535 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool GestureSequence::PinchUpdate(const TouchEvent& event,
const GesturePoint& point, Gestures* gestures) {
DCHECK(state_ == GS_PINCH);
float distance = points_[0].Distance(points_[1]);
if (abs(distance - pinch_distance_current_) < kMinimumPinchUpdateDistance) {
if (!points_[0].DidScroll(event, kMinimumDistanceForPinchScroll) ||
!points_[1].DidScroll(event, kMinimumDistanceForPinchScroll))
return false;
gfx::Point center = points_[0].last_touch_position().Middle(
points_[1].last_touch_position());
AppendScrollGestureUpdate(point, center, gestures);
} else {
AppendPinchGestureUpdate(points_[0], points_[1],
distance / pinch_distance_current_, gestures);
pinch_distance_current_ = distance;
}
return true;
}
Commit Message: Add setters for the aura gesture recognizer constants.
BUG=113227
TEST=none
Review URL: http://codereview.chromium.org/9372040
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@122586 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-20 | bool GestureSequence::PinchUpdate(const TouchEvent& event,
const GesturePoint& point, Gestures* gestures) {
DCHECK(state_ == GS_PINCH);
float distance = points_[0].Distance(points_[1]);
if (abs(distance - pinch_distance_current_) <
GestureConfiguration::minimum_pinch_update_distance_in_pixels()) {
if (!points_[0].DidScroll(event,
GestureConfiguration::minimum_distance_for_pinch_scroll_in_pixels()) ||
!points_[1].DidScroll(event,
GestureConfiguration::minimum_distance_for_pinch_scroll_in_pixels()))
return false;
gfx::Point center = points_[0].last_touch_position().Middle(
points_[1].last_touch_position());
AppendScrollGestureUpdate(point, center, gestures);
} else {
AppendPinchGestureUpdate(points_[0], points_[1],
distance / pinch_distance_current_, gestures);
pinch_distance_current_ = distance;
}
return true;
}
| 171,047 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void CastCastView::ButtonPressed(views::Button* sender,
const ui::Event& event) {
DCHECK(sender == stop_button_);
StopCast();
}
Commit Message: Allow the cast tray to function as expected when the installed extension is missing API methods.
BUG=489445
Review URL: https://codereview.chromium.org/1145833003
Cr-Commit-Position: refs/heads/master@{#330663}
CWE ID: CWE-79 | void CastCastView::ButtonPressed(views::Button* sender,
const ui::Event& event) {
DCHECK(sender == stop_button_);
cast_config_delegate_->StopCasting();
}
| 171,623 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void ConversionContext::Convert(const PaintChunkSubset& paint_chunks,
const DisplayItemList& display_items) {
for (const auto& chunk : paint_chunks) {
const auto& chunk_state = chunk.properties;
bool switched_to_chunk_state = false;
for (const auto& item : display_items.ItemsInPaintChunk(chunk)) {
DCHECK(item.IsDrawing());
auto record =
static_cast<const DrawingDisplayItem&>(item).GetPaintRecord();
if ((!record || record->size() == 0) &&
chunk_state.Effect() == EffectPaintPropertyNode::Root()) {
continue;
}
TranslateForLayerOffsetOnce();
if (!switched_to_chunk_state) {
SwitchToChunkState(chunk);
switched_to_chunk_state = true;
}
cc_list_.StartPaint();
if (record && record->size() != 0)
cc_list_.push<cc::DrawRecordOp>(std::move(record));
cc_list_.EndPaintOfUnpaired(
chunk_to_layer_mapper_.MapVisualRect(item.VisualRect()));
}
UpdateEffectBounds(chunk.bounds, chunk_state.Transform());
}
}
Commit Message: Reland "[CI] Make paint property nodes non-ref-counted"
This reverts commit 887383b30842d9d9006e11bb6932660a3cb5b1b7.
Reason for revert: Retry in M69.
Original change's description:
> Revert "[CI] Make paint property nodes non-ref-counted"
>
> This reverts commit 70fc0b018c9517558b7aa2be00edf2debb449123.
>
> Reason for revert: Caused bugs found by clusterfuzz
>
> Original change's description:
> > [CI] Make paint property nodes non-ref-counted
> >
> > Now all paint property nodes are owned by ObjectPaintProperties
> > (and LocalFrameView temporarily before removing non-RLS mode).
> > Others just use raw pointers or references.
> >
> > Bug: 833496
> > Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
> > Change-Id: I2d544fe153bb94698623248748df63c8aa2081ae
> > Reviewed-on: https://chromium-review.googlesource.com/1031101
> > Reviewed-by: Tien-Ren Chen <[email protected]>
> > Commit-Queue: Xianzhu Wang <[email protected]>
> > Cr-Commit-Position: refs/heads/master@{#554626}
>
> [email protected],[email protected],[email protected]
>
> Change-Id: I02bb50d6744cb81a797246a0116b677e80a3c69f
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Bug: 833496,837932,837943
> Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
> Reviewed-on: https://chromium-review.googlesource.com/1034292
> Reviewed-by: Xianzhu Wang <[email protected]>
> Commit-Queue: Xianzhu Wang <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#554653}
[email protected],[email protected],[email protected]
# Not skipping CQ checks because original CL landed > 1 day ago.
Bug: 833496, 837932, 837943
Change-Id: I0b4ef70db1f1f211ba97c30d617225355c750992
Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
Reviewed-on: https://chromium-review.googlesource.com/1083491
Commit-Queue: Xianzhu Wang <[email protected]>
Reviewed-by: Xianzhu Wang <[email protected]>
Cr-Commit-Position: refs/heads/master@{#563930}
CWE ID: | void ConversionContext::Convert(const PaintChunkSubset& paint_chunks,
const DisplayItemList& display_items) {
for (const auto& chunk : paint_chunks) {
const auto& chunk_state = chunk.properties;
bool switched_to_chunk_state = false;
for (const auto& item : display_items.ItemsInPaintChunk(chunk)) {
DCHECK(item.IsDrawing());
auto record =
static_cast<const DrawingDisplayItem&>(item).GetPaintRecord();
if ((!record || record->size() == 0) &&
chunk_state.Effect() == &EffectPaintPropertyNode::Root()) {
continue;
}
TranslateForLayerOffsetOnce();
if (!switched_to_chunk_state) {
SwitchToChunkState(chunk);
switched_to_chunk_state = true;
}
cc_list_.StartPaint();
if (record && record->size() != 0)
cc_list_.push<cc::DrawRecordOp>(std::move(record));
cc_list_.EndPaintOfUnpaired(
chunk_to_layer_mapper_.MapVisualRect(item.VisualRect()));
}
UpdateEffectBounds(chunk.bounds, chunk_state.Transform());
}
}
| 171,824 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void big_key_describe(const struct key *key, struct seq_file *m)
{
size_t datalen = (size_t)key->payload.data[big_key_len];
seq_puts(m, key->description);
if (key_is_instantiated(key))
seq_printf(m, ": %zu [%s]",
datalen,
datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
}
Commit Message: KEYS: Fix race between updating and finding a negative key
Consolidate KEY_FLAG_INSTANTIATED, KEY_FLAG_NEGATIVE and the rejection
error into one field such that:
(1) The instantiation state can be modified/read atomically.
(2) The error can be accessed atomically with the state.
(3) The error isn't stored unioned with the payload pointers.
This deals with the problem that the state is spread over three different
objects (two bits and a separate variable) and reading or updating them
atomically isn't practical, given that not only can uninstantiated keys
change into instantiated or rejected keys, but rejected keys can also turn
into instantiated keys - and someone accessing the key might not be using
any locking.
The main side effect of this problem is that what was held in the payload
may change, depending on the state. For instance, you might observe the
key to be in the rejected state. You then read the cached error, but if
the key semaphore wasn't locked, the key might've become instantiated
between the two reads - and you might now have something in hand that isn't
actually an error code.
The state is now KEY_IS_UNINSTANTIATED, KEY_IS_POSITIVE or a negative error
code if the key is negatively instantiated. The key_is_instantiated()
function is replaced with key_is_positive() to avoid confusion as negative
keys are also 'instantiated'.
Additionally, barriering is included:
(1) Order payload-set before state-set during instantiation.
(2) Order state-read before payload-read when using the key.
Further separate barriering is necessary if RCU is being used to access the
payload content after reading the payload pointers.
Fixes: 146aa8b1453b ("KEYS: Merge the type-specific data with the payload data")
Cc: [email protected] # v4.4+
Reported-by: Eric Biggers <[email protected]>
Signed-off-by: David Howells <[email protected]>
Reviewed-by: Eric Biggers <[email protected]>
CWE ID: CWE-20 | void big_key_describe(const struct key *key, struct seq_file *m)
{
size_t datalen = (size_t)key->payload.data[big_key_len];
seq_puts(m, key->description);
if (key_is_positive(key))
seq_printf(m, ": %zu [%s]",
datalen,
datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
}
| 167,692 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void SRP_user_pwd_free(SRP_user_pwd *user_pwd)
{
if (user_pwd == NULL)
return;
BN_free(user_pwd->s);
BN_clear_free(user_pwd->v);
OPENSSL_free(user_pwd->id);
OPENSSL_free(user_pwd->info);
OPENSSL_free(user_pwd);
}
Commit Message:
CWE ID: CWE-399 | static void SRP_user_pwd_free(SRP_user_pwd *user_pwd)
void SRP_user_pwd_free(SRP_user_pwd *user_pwd)
{
if (user_pwd == NULL)
return;
BN_free(user_pwd->s);
BN_clear_free(user_pwd->v);
OPENSSL_free(user_pwd->id);
OPENSSL_free(user_pwd->info);
OPENSSL_free(user_pwd);
}
| 165,249 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int fixup_bpf_calls(struct bpf_verifier_env *env)
{
struct bpf_prog *prog = env->prog;
struct bpf_insn *insn = prog->insnsi;
const struct bpf_func_proto *fn;
const int insn_cnt = prog->len;
const struct bpf_map_ops *ops;
struct bpf_insn_aux_data *aux;
struct bpf_insn insn_buf[16];
struct bpf_prog *new_prog;
struct bpf_map *map_ptr;
int i, cnt, delta = 0;
for (i = 0; i < insn_cnt; i++, insn++) {
if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
struct bpf_insn mask_and_div[] = {
BPF_MOV32_REG(insn->src_reg, insn->src_reg),
/* Rx div 0 -> 0 */
BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
*insn,
};
struct bpf_insn mask_and_mod[] = {
BPF_MOV32_REG(insn->src_reg, insn->src_reg),
/* Rx mod 0 -> Rx */
BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
*insn,
};
struct bpf_insn *patchlet;
if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
patchlet = mask_and_div + (is64 ? 1 : 0);
cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
} else {
patchlet = mask_and_mod + (is64 ? 1 : 0);
cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
}
new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
if (BPF_CLASS(insn->code) == BPF_LD &&
(BPF_MODE(insn->code) == BPF_ABS ||
BPF_MODE(insn->code) == BPF_IND)) {
cnt = env->ops->gen_ld_abs(insn, insn_buf);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
if (insn->code != (BPF_JMP | BPF_CALL))
continue;
if (insn->src_reg == BPF_PSEUDO_CALL)
continue;
if (insn->imm == BPF_FUNC_get_route_realm)
prog->dst_needed = 1;
if (insn->imm == BPF_FUNC_get_prandom_u32)
bpf_user_rnd_init_once();
if (insn->imm == BPF_FUNC_override_return)
prog->kprobe_override = 1;
if (insn->imm == BPF_FUNC_tail_call) {
/* If we tail call into other programs, we
* cannot make any assumptions since they can
* be replaced dynamically during runtime in
* the program array.
*/
prog->cb_access = 1;
env->prog->aux->stack_depth = MAX_BPF_STACK;
env->prog->aux->max_pkt_offset = MAX_PACKET_OFF;
/* mark bpf_tail_call as different opcode to avoid
* conditional branch in the interpeter for every normal
* call and to prevent accidental JITing by JIT compiler
* that doesn't support bpf_tail_call yet
*/
insn->imm = 0;
insn->code = BPF_JMP | BPF_TAIL_CALL;
aux = &env->insn_aux_data[i + delta];
if (!bpf_map_ptr_unpriv(aux))
continue;
/* instead of changing every JIT dealing with tail_call
* emit two extra insns:
* if (index >= max_entries) goto out;
* index &= array->index_mask;
* to avoid out-of-bounds cpu speculation
*/
if (bpf_map_ptr_poisoned(aux)) {
verbose(env, "tail_call abusing map_ptr\n");
return -EINVAL;
}
map_ptr = BPF_MAP_PTR(aux->map_state);
insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
map_ptr->max_entries, 2);
insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
container_of(map_ptr,
struct bpf_array,
map)->index_mask);
insn_buf[2] = *insn;
cnt = 3;
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
* and other inlining handlers are currently limited to 64 bit
* only.
*/
if (prog->jit_requested && BITS_PER_LONG == 64 &&
(insn->imm == BPF_FUNC_map_lookup_elem ||
insn->imm == BPF_FUNC_map_update_elem ||
insn->imm == BPF_FUNC_map_delete_elem ||
insn->imm == BPF_FUNC_map_push_elem ||
insn->imm == BPF_FUNC_map_pop_elem ||
insn->imm == BPF_FUNC_map_peek_elem)) {
aux = &env->insn_aux_data[i + delta];
if (bpf_map_ptr_poisoned(aux))
goto patch_call_imm;
map_ptr = BPF_MAP_PTR(aux->map_state);
ops = map_ptr->ops;
if (insn->imm == BPF_FUNC_map_lookup_elem &&
ops->map_gen_lookup) {
cnt = ops->map_gen_lookup(map_ptr, insn_buf);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
new_prog = bpf_patch_insn_data(env, i + delta,
insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
(void *(*)(struct bpf_map *map, void *key))NULL));
BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
(int (*)(struct bpf_map *map, void *key))NULL));
BUILD_BUG_ON(!__same_type(ops->map_update_elem,
(int (*)(struct bpf_map *map, void *key, void *value,
u64 flags))NULL));
BUILD_BUG_ON(!__same_type(ops->map_push_elem,
(int (*)(struct bpf_map *map, void *value,
u64 flags))NULL));
BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
(int (*)(struct bpf_map *map, void *value))NULL));
BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
(int (*)(struct bpf_map *map, void *value))NULL));
switch (insn->imm) {
case BPF_FUNC_map_lookup_elem:
insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_update_elem:
insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_delete_elem:
insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_push_elem:
insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_pop_elem:
insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_peek_elem:
insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
__bpf_call_base;
continue;
}
goto patch_call_imm;
}
patch_call_imm:
fn = env->ops->get_func_proto(insn->imm, env->prog);
/* all functions that have prototype and verifier allowed
* programs to call them, must be real in-kernel functions
*/
if (!fn->func) {
verbose(env,
"kernel subsystem misconfigured func %s#%d\n",
func_id_name(insn->imm), insn->imm);
return -EFAULT;
}
insn->imm = fn->func - __bpf_call_base;
}
return 0;
}
Commit Message: bpf: prevent out of bounds speculation on pointer arithmetic
Jann reported that the original commit back in b2157399cc98
("bpf: prevent out-of-bounds speculation") was not sufficient
to stop CPU from speculating out of bounds memory access:
While b2157399cc98 only focussed on masking array map access
for unprivileged users for tail calls and data access such
that the user provided index gets sanitized from BPF program
and syscall side, there is still a more generic form affected
from BPF programs that applies to most maps that hold user
data in relation to dynamic map access when dealing with
unknown scalars or "slow" known scalars as access offset, for
example:
- Load a map value pointer into R6
- Load an index into R7
- Do a slow computation (e.g. with a memory dependency) that
loads a limit into R8 (e.g. load the limit from a map for
high latency, then mask it to make the verifier happy)
- Exit if R7 >= R8 (mispredicted branch)
- Load R0 = R6[R7]
- Load R0 = R6[R0]
For unknown scalars there are two options in the BPF verifier
where we could derive knowledge from in order to guarantee
safe access to the memory: i) While </>/<=/>= variants won't
allow to derive any lower or upper bounds from the unknown
scalar where it would be safe to add it to the map value
pointer, it is possible through ==/!= test however. ii) another
option is to transform the unknown scalar into a known scalar,
for example, through ALU ops combination such as R &= <imm>
followed by R |= <imm> or any similar combination where the
original information from the unknown scalar would be destroyed
entirely leaving R with a constant. The initial slow load still
precedes the latter ALU ops on that register, so the CPU
executes speculatively from that point. Once we have the known
scalar, any compare operation would work then. A third option
only involving registers with known scalars could be crafted
as described in [0] where a CPU port (e.g. Slow Int unit)
would be filled with many dependent computations such that
the subsequent condition depending on its outcome has to wait
for evaluation on its execution port and thereby executing
speculatively if the speculated code can be scheduled on a
different execution port, or any other form of mistraining
as described in [1], for example. Given this is not limited
to only unknown scalars, not only map but also stack access
is affected since both is accessible for unprivileged users
and could potentially be used for out of bounds access under
speculation.
In order to prevent any of these cases, the verifier is now
sanitizing pointer arithmetic on the offset such that any
out of bounds speculation would be masked in a way where the
pointer arithmetic result in the destination register will
stay unchanged, meaning offset masked into zero similar as
in array_index_nospec() case. With regards to implementation,
there are three options that were considered: i) new insn
for sanitation, ii) push/pop insn and sanitation as inlined
BPF, iii) reuse of ax register and sanitation as inlined BPF.
Option i) has the downside that we end up using from reserved
bits in the opcode space, but also that we would require
each JIT to emit masking as native arch opcodes meaning
mitigation would have slow adoption till everyone implements
it eventually which is counter-productive. Option ii) and iii)
have both in common that a temporary register is needed in
order to implement the sanitation as inlined BPF since we
are not allowed to modify the source register. While a push /
pop insn in ii) would be useful to have in any case, it
requires once again that every JIT needs to implement it
first. While possible, amount of changes needed would also
be unsuitable for a -stable patch. Therefore, the path which
has fewer changes, less BPF instructions for the mitigation
and does not require anything to be changed in the JITs is
option iii) which this work is pursuing. The ax register is
already mapped to a register in all JITs (modulo arm32 where
it's mapped to stack as various other BPF registers there)
and used in constant blinding for JITs-only so far. It can
be reused for verifier rewrites under certain constraints.
The interpreter's tmp "register" has therefore been remapped
into extending the register set with hidden ax register and
reusing that for a number of instructions that needed the
prior temporary variable internally (e.g. div, mod). This
allows for zero increase in stack space usage in the interpreter,
and enables (restricted) generic use in rewrites otherwise as
long as such a patchlet does not make use of these instructions.
The sanitation mask is dynamic and relative to the offset the
map value or stack pointer currently holds.
There are various cases that need to be taken under consideration
for the masking, e.g. such operation could look as follows:
ptr += val or val += ptr or ptr -= val. Thus, the value to be
sanitized could reside either in source or in destination
register, and the limit is different depending on whether
the ALU op is addition or subtraction and depending on the
current known and bounded offset. The limit is derived as
follows: limit := max_value_size - (smin_value + off). For
subtraction: limit := umax_value + off. This holds because
we do not allow any pointer arithmetic that would
temporarily go out of bounds or would have an unknown
value with mixed signed bounds where it is unclear at
verification time whether the actual runtime value would
be either negative or positive. For example, we have a
derived map pointer value with constant offset and bounded
one, so limit based on smin_value works because the verifier
requires that statically analyzed arithmetic on the pointer
must be in bounds, and thus it checks if resulting
smin_value + off and umax_value + off is still within map
value bounds at time of arithmetic in addition to time of
access. Similarly, for the case of stack access we derive
the limit as follows: MAX_BPF_STACK + off for subtraction
and -off for the case of addition where off := ptr_reg->off +
ptr_reg->var_off.value. Subtraction is a special case for
the masking which can be in form of ptr += -val, ptr -= -val,
or ptr -= val. In the first two cases where we know that
the value is negative, we need to temporarily negate the
value in order to do the sanitation on a positive value
where we later swap the ALU op, and restore original source
register if the value was in source.
The sanitation of pointer arithmetic alone is still not fully
sufficient as is, since a scenario like the following could
happen ...
PTR += 0x1000 (e.g. K-based imm)
PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON
PTR += 0x1000
PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON
[...]
... which under speculation could end up as ...
PTR += 0x1000
PTR -= 0 [ truncated by mitigation ]
PTR += 0x1000
PTR -= 0 [ truncated by mitigation ]
[...]
... and therefore still access out of bounds. To prevent such
case, the verifier is also analyzing safety for potential out
of bounds access under speculative execution. Meaning, it is
also simulating pointer access under truncation. We therefore
"branch off" and push the current verification state after the
ALU operation with known 0 to the verification stack for later
analysis. Given the current path analysis succeeded it is
likely that the one under speculation can be pruned. In any
case, it is also subject to existing complexity limits and
therefore anything beyond this point will be rejected. In
terms of pruning, it needs to be ensured that the verification
state from speculative execution simulation must never prune
a non-speculative execution path, therefore, we mark verifier
state accordingly at the time of push_stack(). If verifier
detects out of bounds access under speculative execution from
one of the possible paths that includes a truncation, it will
reject such program.
Given we mask every reg-based pointer arithmetic for
unprivileged programs, we've been looking into how it could
affect real-world programs in terms of size increase. As the
majority of programs are targeted for privileged-only use
case, we've unconditionally enabled masking (with its alu
restrictions on top of it) for privileged programs for the
sake of testing in order to check i) whether they get rejected
in its current form, and ii) by how much the number of
instructions and size will increase. We've tested this by
using Katran, Cilium and test_l4lb from the kernel selftests.
For Katran we've evaluated balancer_kern.o, Cilium bpf_lxc.o
and an older test object bpf_lxc_opt_-DUNKNOWN.o and l4lb
we've used test_l4lb.o as well as test_l4lb_noinline.o. We
found that none of the programs got rejected by the verifier
with this change, and that impact is rather minimal to none.
balancer_kern.o had 13,904 bytes (1,738 insns) xlated and
7,797 bytes JITed before and after the change. Most complex
program in bpf_lxc.o had 30,544 bytes (3,817 insns) xlated
and 18,538 bytes JITed before and after and none of the other
tail call programs in bpf_lxc.o had any changes either. For
the older bpf_lxc_opt_-DUNKNOWN.o object we found a small
increase from 20,616 bytes (2,576 insns) and 12,536 bytes JITed
before to 20,664 bytes (2,582 insns) and 12,558 bytes JITed
after the change. Other programs from that object file had
similar small increase. Both test_l4lb.o had no change and
remained at 6,544 bytes (817 insns) xlated and 3,401 bytes
JITed and for test_l4lb_noinline.o constant at 5,080 bytes
(634 insns) xlated and 3,313 bytes JITed. This can be explained
in that LLVM typically optimizes stack based pointer arithmetic
by using K-based operations and that use of dynamic map access
is not overly frequent. However, in future we may decide to
optimize the algorithm further under known guarantees from
branch and value speculation. Latter seems also unclear in
terms of prediction heuristics that today's CPUs apply as well
as whether there could be collisions in e.g. the predictor's
Value History/Pattern Table for triggering out of bounds access,
thus masking is performed unconditionally at this point but could
be subject to relaxation later on. We were generally also
brainstorming various other approaches for mitigation, but the
blocker was always lack of available registers at runtime and/or
overhead for runtime tracking of limits belonging to a specific
pointer. Thus, we found this to be minimally intrusive under
given constraints.
With that in place, a simple example with sanitized access on
unprivileged load at post-verification time looks as follows:
# bpftool prog dump xlated id 282
[...]
28: (79) r1 = *(u64 *)(r7 +0)
29: (79) r2 = *(u64 *)(r7 +8)
30: (57) r1 &= 15
31: (79) r3 = *(u64 *)(r0 +4608)
32: (57) r3 &= 1
33: (47) r3 |= 1
34: (2d) if r2 > r3 goto pc+19
35: (b4) (u32) r11 = (u32) 20479 |
36: (1f) r11 -= r2 | Dynamic sanitation for pointer
37: (4f) r11 |= r2 | arithmetic with registers
38: (87) r11 = -r11 | containing bounded or known
39: (c7) r11 s>>= 63 | scalars in order to prevent
40: (5f) r11 &= r2 | out of bounds speculation.
41: (0f) r4 += r11 |
42: (71) r4 = *(u8 *)(r4 +0)
43: (6f) r4 <<= r1
[...]
For the case where the scalar sits in the destination register
as opposed to the source register, the following code is emitted
for the above example:
[...]
16: (b4) (u32) r11 = (u32) 20479
17: (1f) r11 -= r2
18: (4f) r11 |= r2
19: (87) r11 = -r11
20: (c7) r11 s>>= 63
21: (5f) r2 &= r11
22: (0f) r2 += r0
23: (61) r0 = *(u32 *)(r2 +0)
[...]
JIT blinding example with non-conflicting use of r10:
[...]
d5: je 0x0000000000000106 _
d7: mov 0x0(%rax),%edi |
da: mov $0xf153246,%r10d | Index load from map value and
e0: xor $0xf153259,%r10 | (const blinded) mask with 0x1f.
e7: and %r10,%rdi |_
ea: mov $0x2f,%r10d |
f0: sub %rdi,%r10 | Sanitized addition. Both use r10
f3: or %rdi,%r10 | but do not interfere with each
f6: neg %r10 | other. (Neither do these instructions
f9: sar $0x3f,%r10 | interfere with the use of ax as temp
fd: and %r10,%rdi | in interpreter.)
100: add %rax,%rdi |_
103: mov 0x0(%rdi),%eax
[...]
Tested that it fixes Jann's reproducer, and also checked that test_verifier
and test_progs suite with interpreter, JIT and JIT with hardening enabled
on x86-64 and arm64 runs successfully.
[0] Speculose: Analyzing the Security Implications of Speculative
Execution in CPUs, Giorgi Maisuradze and Christian Rossow,
https://arxiv.org/pdf/1801.04084.pdf
[1] A Systematic Evaluation of Transient Execution Attacks and
Defenses, Claudio Canella, Jo Van Bulck, Michael Schwarz,
Moritz Lipp, Benjamin von Berg, Philipp Ortner, Frank Piessens,
Dmitry Evtyushkin, Daniel Gruss,
https://arxiv.org/pdf/1811.05441.pdf
Fixes: b2157399cc98 ("bpf: prevent out-of-bounds speculation")
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]>
CWE ID: CWE-189 | static int fixup_bpf_calls(struct bpf_verifier_env *env)
{
struct bpf_prog *prog = env->prog;
struct bpf_insn *insn = prog->insnsi;
const struct bpf_func_proto *fn;
const int insn_cnt = prog->len;
const struct bpf_map_ops *ops;
struct bpf_insn_aux_data *aux;
struct bpf_insn insn_buf[16];
struct bpf_prog *new_prog;
struct bpf_map *map_ptr;
int i, cnt, delta = 0;
for (i = 0; i < insn_cnt; i++, insn++) {
if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
struct bpf_insn mask_and_div[] = {
BPF_MOV32_REG(insn->src_reg, insn->src_reg),
/* Rx div 0 -> 0 */
BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
*insn,
};
struct bpf_insn mask_and_mod[] = {
BPF_MOV32_REG(insn->src_reg, insn->src_reg),
/* Rx mod 0 -> Rx */
BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
*insn,
};
struct bpf_insn *patchlet;
if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
patchlet = mask_and_div + (is64 ? 1 : 0);
cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
} else {
patchlet = mask_and_mod + (is64 ? 1 : 0);
cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
}
new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
if (BPF_CLASS(insn->code) == BPF_LD &&
(BPF_MODE(insn->code) == BPF_ABS ||
BPF_MODE(insn->code) == BPF_IND)) {
cnt = env->ops->gen_ld_abs(insn, insn_buf);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
struct bpf_insn insn_buf[16];
struct bpf_insn *patch = &insn_buf[0];
bool issrc, isneg;
u32 off_reg;
aux = &env->insn_aux_data[i + delta];
if (!aux->alu_state)
continue;
isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
BPF_ALU_SANITIZE_SRC;
off_reg = issrc ? insn->src_reg : insn->dst_reg;
if (isneg)
*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
if (issrc) {
*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
off_reg);
insn->src_reg = BPF_REG_AX;
} else {
*patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
BPF_REG_AX);
}
if (isneg)
insn->code = insn->code == code_add ?
code_sub : code_add;
*patch++ = *insn;
if (issrc && isneg)
*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
cnt = patch - insn_buf;
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
if (insn->code != (BPF_JMP | BPF_CALL))
continue;
if (insn->src_reg == BPF_PSEUDO_CALL)
continue;
if (insn->imm == BPF_FUNC_get_route_realm)
prog->dst_needed = 1;
if (insn->imm == BPF_FUNC_get_prandom_u32)
bpf_user_rnd_init_once();
if (insn->imm == BPF_FUNC_override_return)
prog->kprobe_override = 1;
if (insn->imm == BPF_FUNC_tail_call) {
/* If we tail call into other programs, we
* cannot make any assumptions since they can
* be replaced dynamically during runtime in
* the program array.
*/
prog->cb_access = 1;
env->prog->aux->stack_depth = MAX_BPF_STACK;
env->prog->aux->max_pkt_offset = MAX_PACKET_OFF;
/* mark bpf_tail_call as different opcode to avoid
* conditional branch in the interpeter for every normal
* call and to prevent accidental JITing by JIT compiler
* that doesn't support bpf_tail_call yet
*/
insn->imm = 0;
insn->code = BPF_JMP | BPF_TAIL_CALL;
aux = &env->insn_aux_data[i + delta];
if (!bpf_map_ptr_unpriv(aux))
continue;
/* instead of changing every JIT dealing with tail_call
* emit two extra insns:
* if (index >= max_entries) goto out;
* index &= array->index_mask;
* to avoid out-of-bounds cpu speculation
*/
if (bpf_map_ptr_poisoned(aux)) {
verbose(env, "tail_call abusing map_ptr\n");
return -EINVAL;
}
map_ptr = BPF_MAP_PTR(aux->map_state);
insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
map_ptr->max_entries, 2);
insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
container_of(map_ptr,
struct bpf_array,
map)->index_mask);
insn_buf[2] = *insn;
cnt = 3;
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
* and other inlining handlers are currently limited to 64 bit
* only.
*/
if (prog->jit_requested && BITS_PER_LONG == 64 &&
(insn->imm == BPF_FUNC_map_lookup_elem ||
insn->imm == BPF_FUNC_map_update_elem ||
insn->imm == BPF_FUNC_map_delete_elem ||
insn->imm == BPF_FUNC_map_push_elem ||
insn->imm == BPF_FUNC_map_pop_elem ||
insn->imm == BPF_FUNC_map_peek_elem)) {
aux = &env->insn_aux_data[i + delta];
if (bpf_map_ptr_poisoned(aux))
goto patch_call_imm;
map_ptr = BPF_MAP_PTR(aux->map_state);
ops = map_ptr->ops;
if (insn->imm == BPF_FUNC_map_lookup_elem &&
ops->map_gen_lookup) {
cnt = ops->map_gen_lookup(map_ptr, insn_buf);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
new_prog = bpf_patch_insn_data(env, i + delta,
insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
(void *(*)(struct bpf_map *map, void *key))NULL));
BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
(int (*)(struct bpf_map *map, void *key))NULL));
BUILD_BUG_ON(!__same_type(ops->map_update_elem,
(int (*)(struct bpf_map *map, void *key, void *value,
u64 flags))NULL));
BUILD_BUG_ON(!__same_type(ops->map_push_elem,
(int (*)(struct bpf_map *map, void *value,
u64 flags))NULL));
BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
(int (*)(struct bpf_map *map, void *value))NULL));
BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
(int (*)(struct bpf_map *map, void *value))NULL));
switch (insn->imm) {
case BPF_FUNC_map_lookup_elem:
insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_update_elem:
insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_delete_elem:
insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_push_elem:
insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_pop_elem:
insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
__bpf_call_base;
continue;
case BPF_FUNC_map_peek_elem:
insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
__bpf_call_base;
continue;
}
goto patch_call_imm;
}
patch_call_imm:
fn = env->ops->get_func_proto(insn->imm, env->prog);
/* all functions that have prototype and verifier allowed
* programs to call them, must be real in-kernel functions
*/
if (!fn->func) {
verbose(env,
"kernel subsystem misconfigured func %s#%d\n",
func_id_name(insn->imm), insn->imm);
return -EFAULT;
}
insn->imm = fn->func - __bpf_call_base;
}
return 0;
}
| 170,243 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: magiccheck(struct magic_set *ms, struct magic *m)
{
uint64_t l = m->value.q;
uint64_t v;
float fl, fv;
double dl, dv;
int matched;
union VALUETYPE *p = &ms->ms_value;
switch (m->type) {
case FILE_BYTE:
v = p->b;
break;
case FILE_SHORT:
case FILE_BESHORT:
case FILE_LESHORT:
v = p->h;
break;
case FILE_LONG:
case FILE_BELONG:
case FILE_LELONG:
case FILE_MELONG:
case FILE_DATE:
case FILE_BEDATE:
case FILE_LEDATE:
case FILE_MEDATE:
case FILE_LDATE:
case FILE_BELDATE:
case FILE_LELDATE:
case FILE_MELDATE:
v = p->l;
break;
case FILE_QUAD:
case FILE_LEQUAD:
case FILE_BEQUAD:
case FILE_QDATE:
case FILE_BEQDATE:
case FILE_LEQDATE:
case FILE_QLDATE:
case FILE_BEQLDATE:
case FILE_LEQLDATE:
case FILE_QWDATE:
case FILE_BEQWDATE:
case FILE_LEQWDATE:
v = p->q;
break;
case FILE_FLOAT:
case FILE_BEFLOAT:
case FILE_LEFLOAT:
fl = m->value.f;
fv = p->f;
switch (m->reln) {
case 'x':
matched = 1;
break;
case '!':
matched = fv != fl;
break;
case '=':
matched = fv == fl;
break;
case '>':
matched = fv > fl;
break;
case '<':
matched = fv < fl;
break;
default:
file_magerror(ms, "cannot happen with float: invalid relation `%c'",
m->reln);
return -1;
}
return matched;
case FILE_DOUBLE:
case FILE_BEDOUBLE:
case FILE_LEDOUBLE:
dl = m->value.d;
dv = p->d;
switch (m->reln) {
case 'x':
matched = 1;
break;
case '!':
matched = dv != dl;
break;
case '=':
matched = dv == dl;
break;
case '>':
matched = dv > dl;
break;
case '<':
matched = dv < dl;
break;
default:
file_magerror(ms, "cannot happen with double: invalid relation `%c'", m->reln);
return -1;
}
return matched;
case FILE_DEFAULT:
case FILE_CLEAR:
l = 0;
v = 0;
break;
case FILE_STRING:
case FILE_PSTRING:
l = 0;
v = file_strncmp(m->value.s, p->s, (size_t)m->vallen, m->str_flags);
break;
case FILE_BESTRING16:
case FILE_LESTRING16:
l = 0;
v = file_strncmp16(m->value.s, p->s, (size_t)m->vallen, m->str_flags);
break;
case FILE_SEARCH: { /* search ms->search.s for the string m->value.s */
size_t slen;
size_t idx;
if (ms->search.s == NULL)
return 0;
slen = MIN(m->vallen, sizeof(m->value.s));
l = 0;
v = 0;
for (idx = 0; m->str_range == 0 || idx < m->str_range; idx++) {
if (slen + idx > ms->search.s_len)
break;
v = file_strncmp(m->value.s, ms->search.s + idx, slen, m->str_flags);
if (v == 0) { /* found match */
ms->search.offset += idx;
break;
}
}
break;
}
case FILE_REGEX: {
int rc;
file_regex_t rx;
if (ms->search.s == NULL)
return 0;
l = 0;
rc = file_regcomp(&rx, m->value.s,
REG_EXTENDED|REG_NEWLINE|
((m->str_flags & STRING_IGNORE_CASE) ? REG_ICASE : 0));
if (rc) {
file_regerror(&rx, rc, ms);
v = (uint64_t)-1;
} else {
#ifndef REG_STARTEND
char c;
#endif
regmatch_t pmatch[1];
size_t slen = ms->search.s_len;
/* Limit by offset if requested */
if (m->str_range > 0)
slen = MIN(slen, m->str_range);
#ifndef REG_STARTEND
#define REG_STARTEND 0
if (slen != 0)
slen--;
c = ms->search.s[slen];
((char *)(intptr_t)ms->search.s)[slen] = '\0';
#else
pmatch[0].rm_so = 0;
pmatch[0].rm_eo = slen;
#endif
rc = file_regexec(&rx, (const char *)ms->search.s,
1, pmatch, REG_STARTEND);
#if REG_STARTEND == 0
((char *)(intptr_t)ms->search.s)[l] = c;
#endif
switch (rc) {
case 0:
ms->search.s += (int)pmatch[0].rm_so;
ms->search.offset += (size_t)pmatch[0].rm_so;
ms->search.rm_len =
(size_t)(pmatch[0].rm_eo - pmatch[0].rm_so);
v = 0;
break;
case REG_NOMATCH:
v = 1;
break;
default:
file_regerror(&rx, rc, ms);
v = (uint64_t)-1;
break;
}
}
file_regfree(&rx);
if (v == (uint64_t)-1)
return -1;
break;
}
case FILE_INDIRECT:
case FILE_USE:
case FILE_NAME:
return 1;
default:
file_magerror(ms, "invalid type %d in magiccheck()", m->type);
return -1;
}
v = file_signextend(ms, m, v);
switch (m->reln) {
case 'x':
if ((ms->flags & MAGIC_DEBUG) != 0)
(void) fprintf(stderr, "%" INT64_T_FORMAT
"u == *any* = 1\n", (unsigned long long)v);
matched = 1;
break;
case '!':
matched = v != l;
if ((ms->flags & MAGIC_DEBUG) != 0)
(void) fprintf(stderr, "%" INT64_T_FORMAT "u != %"
INT64_T_FORMAT "u = %d\n", (unsigned long long)v,
(unsigned long long)l, matched);
break;
case '=':
matched = v == l;
if ((ms->flags & MAGIC_DEBUG) != 0)
(void) fprintf(stderr, "%" INT64_T_FORMAT "u == %"
INT64_T_FORMAT "u = %d\n", (unsigned long long)v,
(unsigned long long)l, matched);
break;
case '>':
if (m->flag & UNSIGNED) {
matched = v > l;
if ((ms->flags & MAGIC_DEBUG) != 0)
(void) fprintf(stderr, "%" INT64_T_FORMAT
"u > %" INT64_T_FORMAT "u = %d\n",
(unsigned long long)v,
(unsigned long long)l, matched);
}
else {
matched = (int64_t) v > (int64_t) l;
if ((ms->flags & MAGIC_DEBUG) != 0)
(void) fprintf(stderr, "%" INT64_T_FORMAT
"d > %" INT64_T_FORMAT "d = %d\n",
(long long)v, (long long)l, matched);
}
break;
case '<':
if (m->flag & UNSIGNED) {
matched = v < l;
if ((ms->flags & MAGIC_DEBUG) != 0)
(void) fprintf(stderr, "%" INT64_T_FORMAT
"u < %" INT64_T_FORMAT "u = %d\n",
(unsigned long long)v,
(unsigned long long)l, matched);
}
else {
matched = (int64_t) v < (int64_t) l;
if ((ms->flags & MAGIC_DEBUG) != 0)
(void) fprintf(stderr, "%" INT64_T_FORMAT
"d < %" INT64_T_FORMAT "d = %d\n",
(long long)v, (long long)l, matched);
}
break;
case '&':
matched = (v & l) == l;
if ((ms->flags & MAGIC_DEBUG) != 0)
(void) fprintf(stderr, "((%" INT64_T_FORMAT "x & %"
INT64_T_FORMAT "x) == %" INT64_T_FORMAT
"x) = %d\n", (unsigned long long)v,
(unsigned long long)l, (unsigned long long)l,
matched);
break;
case '^':
matched = (v & l) != l;
if ((ms->flags & MAGIC_DEBUG) != 0)
(void) fprintf(stderr, "((%" INT64_T_FORMAT "x & %"
INT64_T_FORMAT "x) != %" INT64_T_FORMAT
"x) = %d\n", (unsigned long long)v,
(unsigned long long)l, (unsigned long long)l,
matched);
break;
default:
file_magerror(ms, "cannot happen: invalid relation `%c'",
m->reln);
return -1;
}
return matched;
}
Commit Message: * Enforce limit of 8K on regex searches that have no limits
* Allow the l modifier for regex to mean line count. Default
to byte count. If line count is specified, assume a max
of 80 characters per line to limit the byte count.
* Don't allow conversions to be used for dates, allowing
the mask field to be used as an offset.
* Bump the version of the magic format so that regex changes
are visible.
CWE ID: CWE-399 | magiccheck(struct magic_set *ms, struct magic *m)
{
uint64_t l = m->value.q;
uint64_t v;
float fl, fv;
double dl, dv;
int matched;
union VALUETYPE *p = &ms->ms_value;
switch (m->type) {
case FILE_BYTE:
v = p->b;
break;
case FILE_SHORT:
case FILE_BESHORT:
case FILE_LESHORT:
v = p->h;
break;
case FILE_LONG:
case FILE_BELONG:
case FILE_LELONG:
case FILE_MELONG:
case FILE_DATE:
case FILE_BEDATE:
case FILE_LEDATE:
case FILE_MEDATE:
case FILE_LDATE:
case FILE_BELDATE:
case FILE_LELDATE:
case FILE_MELDATE:
v = p->l;
break;
case FILE_QUAD:
case FILE_LEQUAD:
case FILE_BEQUAD:
case FILE_QDATE:
case FILE_BEQDATE:
case FILE_LEQDATE:
case FILE_QLDATE:
case FILE_BEQLDATE:
case FILE_LEQLDATE:
case FILE_QWDATE:
case FILE_BEQWDATE:
case FILE_LEQWDATE:
v = p->q;
break;
case FILE_FLOAT:
case FILE_BEFLOAT:
case FILE_LEFLOAT:
fl = m->value.f;
fv = p->f;
switch (m->reln) {
case 'x':
matched = 1;
break;
case '!':
matched = fv != fl;
break;
case '=':
matched = fv == fl;
break;
case '>':
matched = fv > fl;
break;
case '<':
matched = fv < fl;
break;
default:
file_magerror(ms, "cannot happen with float: invalid relation `%c'",
m->reln);
return -1;
}
return matched;
case FILE_DOUBLE:
case FILE_BEDOUBLE:
case FILE_LEDOUBLE:
dl = m->value.d;
dv = p->d;
switch (m->reln) {
case 'x':
matched = 1;
break;
case '!':
matched = dv != dl;
break;
case '=':
matched = dv == dl;
break;
case '>':
matched = dv > dl;
break;
case '<':
matched = dv < dl;
break;
default:
file_magerror(ms, "cannot happen with double: invalid relation `%c'", m->reln);
return -1;
}
return matched;
case FILE_DEFAULT:
case FILE_CLEAR:
l = 0;
v = 0;
break;
case FILE_STRING:
case FILE_PSTRING:
l = 0;
v = file_strncmp(m->value.s, p->s, (size_t)m->vallen, m->str_flags);
break;
case FILE_BESTRING16:
case FILE_LESTRING16:
l = 0;
v = file_strncmp16(m->value.s, p->s, (size_t)m->vallen, m->str_flags);
break;
case FILE_SEARCH: { /* search ms->search.s for the string m->value.s */
size_t slen;
size_t idx;
if (ms->search.s == NULL)
return 0;
slen = MIN(m->vallen, sizeof(m->value.s));
l = 0;
v = 0;
for (idx = 0; m->str_range == 0 || idx < m->str_range; idx++) {
if (slen + idx > ms->search.s_len)
break;
v = file_strncmp(m->value.s, ms->search.s + idx, slen,
m->str_flags);
if (v == 0) { /* found match */
ms->search.offset += idx;
break;
}
}
break;
}
case FILE_REGEX: {
int rc;
file_regex_t rx;
if (ms->search.s == NULL)
return 0;
l = 0;
rc = file_regcomp(&rx, m->value.s,
REG_EXTENDED|REG_NEWLINE|
((m->str_flags & STRING_IGNORE_CASE) ? REG_ICASE : 0));
if (rc) {
file_regerror(&rx, rc, ms);
v = (uint64_t)-1;
} else {
regmatch_t pmatch[1];
size_t slen = ms->search.s_len;
#ifndef REG_STARTEND
#define REG_STARTEND 0
char c;
if (slen != 0)
slen--;
c = ms->search.s[slen];
((char *)(intptr_t)ms->search.s)[slen] = '\0';
#else
pmatch[0].rm_so = 0;
pmatch[0].rm_eo = slen;
#endif
rc = file_regexec(&rx, (const char *)ms->search.s,
1, pmatch, REG_STARTEND);
#if REG_STARTEND == 0
((char *)(intptr_t)ms->search.s)[l] = c;
#endif
switch (rc) {
case 0:
ms->search.s += (int)pmatch[0].rm_so;
ms->search.offset += (size_t)pmatch[0].rm_so;
ms->search.rm_len =
(size_t)(pmatch[0].rm_eo - pmatch[0].rm_so);
v = 0;
break;
case REG_NOMATCH:
v = 1;
break;
default:
file_regerror(&rx, rc, ms);
v = (uint64_t)-1;
break;
}
}
file_regfree(&rx);
if (v == (uint64_t)-1)
return -1;
break;
}
case FILE_INDIRECT:
case FILE_USE:
case FILE_NAME:
return 1;
default:
file_magerror(ms, "invalid type %d in magiccheck()", m->type);
return -1;
}
v = file_signextend(ms, m, v);
switch (m->reln) {
case 'x':
if ((ms->flags & MAGIC_DEBUG) != 0)
(void) fprintf(stderr, "%" INT64_T_FORMAT
"u == *any* = 1\n", (unsigned long long)v);
matched = 1;
break;
case '!':
matched = v != l;
if ((ms->flags & MAGIC_DEBUG) != 0)
(void) fprintf(stderr, "%" INT64_T_FORMAT "u != %"
INT64_T_FORMAT "u = %d\n", (unsigned long long)v,
(unsigned long long)l, matched);
break;
case '=':
matched = v == l;
if ((ms->flags & MAGIC_DEBUG) != 0)
(void) fprintf(stderr, "%" INT64_T_FORMAT "u == %"
INT64_T_FORMAT "u = %d\n", (unsigned long long)v,
(unsigned long long)l, matched);
break;
case '>':
if (m->flag & UNSIGNED) {
matched = v > l;
if ((ms->flags & MAGIC_DEBUG) != 0)
(void) fprintf(stderr, "%" INT64_T_FORMAT
"u > %" INT64_T_FORMAT "u = %d\n",
(unsigned long long)v,
(unsigned long long)l, matched);
}
else {
matched = (int64_t) v > (int64_t) l;
if ((ms->flags & MAGIC_DEBUG) != 0)
(void) fprintf(stderr, "%" INT64_T_FORMAT
"d > %" INT64_T_FORMAT "d = %d\n",
(long long)v, (long long)l, matched);
}
break;
case '<':
if (m->flag & UNSIGNED) {
matched = v < l;
if ((ms->flags & MAGIC_DEBUG) != 0)
(void) fprintf(stderr, "%" INT64_T_FORMAT
"u < %" INT64_T_FORMAT "u = %d\n",
(unsigned long long)v,
(unsigned long long)l, matched);
}
else {
matched = (int64_t) v < (int64_t) l;
if ((ms->flags & MAGIC_DEBUG) != 0)
(void) fprintf(stderr, "%" INT64_T_FORMAT
"d < %" INT64_T_FORMAT "d = %d\n",
(long long)v, (long long)l, matched);
}
break;
case '&':
matched = (v & l) == l;
if ((ms->flags & MAGIC_DEBUG) != 0)
(void) fprintf(stderr, "((%" INT64_T_FORMAT "x & %"
INT64_T_FORMAT "x) == %" INT64_T_FORMAT
"x) = %d\n", (unsigned long long)v,
(unsigned long long)l, (unsigned long long)l,
matched);
break;
case '^':
matched = (v & l) != l;
if ((ms->flags & MAGIC_DEBUG) != 0)
(void) fprintf(stderr, "((%" INT64_T_FORMAT "x & %"
INT64_T_FORMAT "x) != %" INT64_T_FORMAT
"x) = %d\n", (unsigned long long)v,
(unsigned long long)l, (unsigned long long)l,
matched);
break;
default:
file_magerror(ms, "cannot happen: invalid relation `%c'",
m->reln);
return -1;
}
return matched;
}
| 166,357 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: pvscsi_ring_init_data(PVSCSIRingInfo *m, PVSCSICmdDescSetupRings *ri)
{
int i;
uint32_t txr_len_log2, rxr_len_log2;
uint32_t req_ring_size, cmp_ring_size;
m->rs_pa = ri->ringsStatePPN << VMW_PAGE_SHIFT;
if ((ri->reqRingNumPages > PVSCSI_SETUP_RINGS_MAX_NUM_PAGES)
|| (ri->cmpRingNumPages > PVSCSI_SETUP_RINGS_MAX_NUM_PAGES)) {
return -1;
}
req_ring_size = ri->reqRingNumPages * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
cmp_ring_size = ri->cmpRingNumPages * PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
txr_len_log2 = pvscsi_log2(req_ring_size - 1);
}
Commit Message:
CWE ID: CWE-125 | pvscsi_ring_init_data(PVSCSIRingInfo *m, PVSCSICmdDescSetupRings *ri)
{
int i;
uint32_t txr_len_log2, rxr_len_log2;
uint32_t req_ring_size, cmp_ring_size;
m->rs_pa = ri->ringsStatePPN << VMW_PAGE_SHIFT;
req_ring_size = ri->reqRingNumPages * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
cmp_ring_size = ri->cmpRingNumPages * PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
txr_len_log2 = pvscsi_log2(req_ring_size - 1);
}
| 164,937 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int tower_probe (struct usb_interface *interface, const struct usb_device_id *id)
{
struct device *idev = &interface->dev;
struct usb_device *udev = interface_to_usbdev(interface);
struct lego_usb_tower *dev = NULL;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor* endpoint;
struct tower_get_version_reply get_version_reply;
int i;
int retval = -ENOMEM;
int result;
/* allocate memory for our device state and initialize it */
dev = kmalloc (sizeof(struct lego_usb_tower), GFP_KERNEL);
if (!dev)
goto exit;
mutex_init(&dev->lock);
dev->udev = udev;
dev->open_count = 0;
dev->read_buffer = NULL;
dev->read_buffer_length = 0;
dev->read_packet_length = 0;
spin_lock_init (&dev->read_buffer_lock);
dev->packet_timeout_jiffies = msecs_to_jiffies(packet_timeout);
dev->read_last_arrival = jiffies;
init_waitqueue_head (&dev->read_wait);
init_waitqueue_head (&dev->write_wait);
dev->interrupt_in_buffer = NULL;
dev->interrupt_in_endpoint = NULL;
dev->interrupt_in_urb = NULL;
dev->interrupt_in_running = 0;
dev->interrupt_in_done = 0;
dev->interrupt_out_buffer = NULL;
dev->interrupt_out_endpoint = NULL;
dev->interrupt_out_urb = NULL;
dev->interrupt_out_busy = 0;
iface_desc = interface->cur_altsetting;
/* set up the endpoint information */
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
if (usb_endpoint_xfer_int(endpoint)) {
if (usb_endpoint_dir_in(endpoint))
dev->interrupt_in_endpoint = endpoint;
else
dev->interrupt_out_endpoint = endpoint;
}
}
if(dev->interrupt_in_endpoint == NULL) {
dev_err(idev, "interrupt in endpoint not found\n");
goto error;
}
if (dev->interrupt_out_endpoint == NULL) {
dev_err(idev, "interrupt out endpoint not found\n");
goto error;
}
dev->read_buffer = kmalloc (read_buffer_size, GFP_KERNEL);
if (!dev->read_buffer)
goto error;
dev->interrupt_in_buffer = kmalloc (usb_endpoint_maxp(dev->interrupt_in_endpoint), GFP_KERNEL);
if (!dev->interrupt_in_buffer)
goto error;
dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_in_urb)
goto error;
dev->interrupt_out_buffer = kmalloc (write_buffer_size, GFP_KERNEL);
if (!dev->interrupt_out_buffer)
goto error;
dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_out_urb)
goto error;
dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval;
dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
/* we can register the device now, as it is ready */
usb_set_intfdata (interface, dev);
retval = usb_register_dev (interface, &tower_class);
if (retval) {
/* something prevented us from registering this driver */
dev_err(idev, "Not able to get a minor for this device.\n");
usb_set_intfdata (interface, NULL);
goto error;
}
dev->minor = interface->minor;
/* let the user know what node this device is now attached to */
dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major "
"%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE),
USB_MAJOR, dev->minor);
/* get the firmware version and log it */
result = usb_control_msg (udev,
usb_rcvctrlpipe(udev, 0),
LEGO_USB_TOWER_REQUEST_GET_VERSION,
USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
0,
0,
&get_version_reply,
sizeof(get_version_reply),
1000);
if (result < 0) {
dev_err(idev, "LEGO USB Tower get version control request failed\n");
retval = result;
goto error;
}
dev_info(&interface->dev, "LEGO USB Tower firmware version is %d.%d "
"build %d\n", get_version_reply.major,
get_version_reply.minor,
le16_to_cpu(get_version_reply.build_no));
exit:
return retval;
error:
tower_delete(dev);
return retval;
}
Commit Message: usb: misc: legousbtower: Fix NULL pointer deference
This patch fixes a NULL pointer dereference caused by a race codition in
the probe function of the legousbtower driver. It re-structures the
probe function to only register the interface after successfully reading
the board's firmware ID.
The probe function does not deregister the usb interface after an error
receiving the devices firmware ID. The device file registered
(/dev/usb/legousbtower%d) may be read/written globally before the probe
function returns. When tower_delete is called in the probe function
(after an r/w has been initiated), core dev structures are deleted while
the file operation functions are still running. If the 0 address is
mappable on the machine, this vulnerability can be used to create a
Local Priviege Escalation exploit via a write-what-where condition by
remapping dev->interrupt_out_buffer in tower_write. A forged USB device
and local program execution would be required for LPE. The USB device
would have to delay the control message in tower_probe and accept
the control urb in tower_open whilst guest code initiated a write to the
device file as tower_delete is called from the error in tower_probe.
This bug has existed since 2003. Patch tested by emulated device.
Reported-by: James Patrick-Evans <[email protected]>
Tested-by: James Patrick-Evans <[email protected]>
Signed-off-by: James Patrick-Evans <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
CWE ID: CWE-476 | static int tower_probe (struct usb_interface *interface, const struct usb_device_id *id)
{
struct device *idev = &interface->dev;
struct usb_device *udev = interface_to_usbdev(interface);
struct lego_usb_tower *dev = NULL;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor* endpoint;
struct tower_get_version_reply get_version_reply;
int i;
int retval = -ENOMEM;
int result;
/* allocate memory for our device state and initialize it */
dev = kmalloc (sizeof(struct lego_usb_tower), GFP_KERNEL);
if (!dev)
goto exit;
mutex_init(&dev->lock);
dev->udev = udev;
dev->open_count = 0;
dev->read_buffer = NULL;
dev->read_buffer_length = 0;
dev->read_packet_length = 0;
spin_lock_init (&dev->read_buffer_lock);
dev->packet_timeout_jiffies = msecs_to_jiffies(packet_timeout);
dev->read_last_arrival = jiffies;
init_waitqueue_head (&dev->read_wait);
init_waitqueue_head (&dev->write_wait);
dev->interrupt_in_buffer = NULL;
dev->interrupt_in_endpoint = NULL;
dev->interrupt_in_urb = NULL;
dev->interrupt_in_running = 0;
dev->interrupt_in_done = 0;
dev->interrupt_out_buffer = NULL;
dev->interrupt_out_endpoint = NULL;
dev->interrupt_out_urb = NULL;
dev->interrupt_out_busy = 0;
iface_desc = interface->cur_altsetting;
/* set up the endpoint information */
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
if (usb_endpoint_xfer_int(endpoint)) {
if (usb_endpoint_dir_in(endpoint))
dev->interrupt_in_endpoint = endpoint;
else
dev->interrupt_out_endpoint = endpoint;
}
}
if(dev->interrupt_in_endpoint == NULL) {
dev_err(idev, "interrupt in endpoint not found\n");
goto error;
}
if (dev->interrupt_out_endpoint == NULL) {
dev_err(idev, "interrupt out endpoint not found\n");
goto error;
}
dev->read_buffer = kmalloc (read_buffer_size, GFP_KERNEL);
if (!dev->read_buffer)
goto error;
dev->interrupt_in_buffer = kmalloc (usb_endpoint_maxp(dev->interrupt_in_endpoint), GFP_KERNEL);
if (!dev->interrupt_in_buffer)
goto error;
dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_in_urb)
goto error;
dev->interrupt_out_buffer = kmalloc (write_buffer_size, GFP_KERNEL);
if (!dev->interrupt_out_buffer)
goto error;
dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_out_urb)
goto error;
dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval;
dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
/* get the firmware version and log it */
result = usb_control_msg (udev,
usb_rcvctrlpipe(udev, 0),
LEGO_USB_TOWER_REQUEST_GET_VERSION,
USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
0,
0,
&get_version_reply,
sizeof(get_version_reply),
1000);
if (result < 0) {
dev_err(idev, "LEGO USB Tower get version control request failed\n");
retval = result;
goto error;
}
dev_info(&interface->dev, "LEGO USB Tower firmware version is %d.%d "
"build %d\n", get_version_reply.major,
get_version_reply.minor,
le16_to_cpu(get_version_reply.build_no));
/* we can register the device now, as it is ready */
usb_set_intfdata (interface, dev);
retval = usb_register_dev (interface, &tower_class);
if (retval) {
/* something prevented us from registering this driver */
dev_err(idev, "Not able to get a minor for this device.\n");
usb_set_intfdata (interface, NULL);
goto error;
}
dev->minor = interface->minor;
/* let the user know what node this device is now attached to */
dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major "
"%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE),
USB_MAJOR, dev->minor);
exit:
return retval;
error:
tower_delete(dev);
return retval;
}
| 167,737 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: MagickExport void *ResizeQuantumMemory(void *memory,const size_t count,
const size_t quantum)
{
size_t
extent;
if (CheckMemoryOverflow(count,quantum) != MagickFalse)
{
memory=RelinquishMagickMemory(memory);
return((void *) NULL);
}
extent=count*quantum;
return(ResizeMagickMemory(memory,extent));
}
Commit Message: Suspend exception processing if there are too many exceptions
CWE ID: CWE-119 | MagickExport void *ResizeQuantumMemory(void *memory,const size_t count,
const size_t quantum)
{
size_t
extent;
if (HeapOverflowSanityCheck(count,quantum) != MagickFalse)
{
memory=RelinquishMagickMemory(memory);
return((void *) NULL);
}
extent=count*quantum;
return(ResizeMagickMemory(memory,extent));
}
| 168,545 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int __mkroute_input(struct sk_buff *skb,
const struct fib_result *res,
struct in_device *in_dev,
__be32 daddr, __be32 saddr, u32 tos)
{
struct fib_nh_exception *fnhe;
struct rtable *rth;
int err;
struct in_device *out_dev;
unsigned int flags = 0;
bool do_cache;
u32 itag = 0;
/* get a working reference to the output device */
out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
if (out_dev == NULL) {
net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
return -EINVAL;
}
err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
in_dev->dev, in_dev, &itag);
if (err < 0) {
ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
saddr);
goto cleanup;
}
do_cache = res->fi && !itag;
if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
(IN_DEV_SHARED_MEDIA(out_dev) ||
inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) {
flags |= RTCF_DOREDIRECT;
do_cache = false;
}
if (skb->protocol != htons(ETH_P_IP)) {
/* Not IP (i.e. ARP). Do not create route, if it is
* invalid for proxy arp. DNAT routes are always valid.
*
* Proxy arp feature have been extended to allow, ARP
* replies back to the same interface, to support
* Private VLAN switch technologies. See arp.c.
*/
if (out_dev == in_dev &&
IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
err = -EINVAL;
goto cleanup;
}
}
fnhe = find_exception(&FIB_RES_NH(*res), daddr);
if (do_cache) {
if (fnhe != NULL)
rth = rcu_dereference(fnhe->fnhe_rth_input);
else
rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
if (rt_cache_valid(rth)) {
skb_dst_set_noref(skb, &rth->dst);
goto out;
}
}
rth = rt_dst_alloc(out_dev->dev,
IN_DEV_CONF_GET(in_dev, NOPOLICY),
IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
if (!rth) {
err = -ENOBUFS;
goto cleanup;
}
rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev));
rth->rt_flags = flags;
rth->rt_type = res->type;
rth->rt_is_input = 1;
rth->rt_iif = 0;
rth->rt_pmtu = 0;
rth->rt_gateway = 0;
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
RT_CACHE_STAT_INC(in_slow_tot);
rth->dst.input = ip_forward;
rth->dst.output = ip_output;
rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
skb_dst_set(skb, &rth->dst);
out:
err = 0;
cleanup:
return err;
}
Commit Message: ipv4: try to cache dst_entries which would cause a redirect
Not caching dst_entries which cause redirects could be exploited by hosts
on the same subnet, causing a severe DoS attack. This effect aggravated
since commit f88649721268999 ("ipv4: fix dst race in sk_dst_get()").
Lookups causing redirects will be allocated with DST_NOCACHE set which
will force dst_release to free them via RCU. Unfortunately waiting for
RCU grace period just takes too long, we can end up with >1M dst_entries
waiting to be released and the system will run OOM. rcuos threads cannot
catch up under high softirq load.
Attaching the flag to emit a redirect later on to the specific skb allows
us to cache those dst_entries thus reducing the pressure on allocation
and deallocation.
This issue was discovered by Marcelo Leitner.
Cc: Julian Anastasov <[email protected]>
Signed-off-by: Marcelo Leitner <[email protected]>
Signed-off-by: Florian Westphal <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: Julian Anastasov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-17 | static int __mkroute_input(struct sk_buff *skb,
const struct fib_result *res,
struct in_device *in_dev,
__be32 daddr, __be32 saddr, u32 tos)
{
struct fib_nh_exception *fnhe;
struct rtable *rth;
int err;
struct in_device *out_dev;
unsigned int flags = 0;
bool do_cache;
u32 itag = 0;
/* get a working reference to the output device */
out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
if (out_dev == NULL) {
net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
return -EINVAL;
}
err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
in_dev->dev, in_dev, &itag);
if (err < 0) {
ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
saddr);
goto cleanup;
}
do_cache = res->fi && !itag;
if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
skb->protocol == htons(ETH_P_IP) &&
(IN_DEV_SHARED_MEDIA(out_dev) ||
inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
IPCB(skb)->flags |= IPSKB_DOREDIRECT;
if (skb->protocol != htons(ETH_P_IP)) {
/* Not IP (i.e. ARP). Do not create route, if it is
* invalid for proxy arp. DNAT routes are always valid.
*
* Proxy arp feature have been extended to allow, ARP
* replies back to the same interface, to support
* Private VLAN switch technologies. See arp.c.
*/
if (out_dev == in_dev &&
IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
err = -EINVAL;
goto cleanup;
}
}
fnhe = find_exception(&FIB_RES_NH(*res), daddr);
if (do_cache) {
if (fnhe != NULL)
rth = rcu_dereference(fnhe->fnhe_rth_input);
else
rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
if (rt_cache_valid(rth)) {
skb_dst_set_noref(skb, &rth->dst);
goto out;
}
}
rth = rt_dst_alloc(out_dev->dev,
IN_DEV_CONF_GET(in_dev, NOPOLICY),
IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
if (!rth) {
err = -ENOBUFS;
goto cleanup;
}
rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev));
rth->rt_flags = flags;
rth->rt_type = res->type;
rth->rt_is_input = 1;
rth->rt_iif = 0;
rth->rt_pmtu = 0;
rth->rt_gateway = 0;
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
RT_CACHE_STAT_INC(in_slow_tot);
rth->dst.input = ip_forward;
rth->dst.output = ip_output;
rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
skb_dst_set(skb, &rth->dst);
out:
err = 0;
cleanup:
return err;
}
| 166,698 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static Image *ReadMATImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image *image, *image2=NULL,
*rotated_image;
PixelPacket *q;
unsigned int status;
MATHeader MATLAB_HDR;
size_t size;
size_t CellType;
QuantumInfo *quantum_info;
ImageInfo *clone_info;
int i;
ssize_t ldblk;
unsigned char *BImgBuff = NULL;
double MinVal, MaxVal;
size_t Unknown6;
unsigned z, z2;
unsigned Frames;
int logging;
int sample_size;
MagickOffsetType filepos=0x80;
BlobInfo *blob;
size_t one;
unsigned int (*ReadBlobXXXLong)(Image *image);
unsigned short (*ReadBlobXXXShort)(Image *image);
void (*ReadBlobDoublesXXX)(Image * image, size_t len, double *data);
void (*ReadBlobFloatsXXX)(Image * image, size_t len, float *data);
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
logging = LogMagickEvent(CoderEvent,GetMagickModule(),"enter");
/*
Open image file.
*/
image = AcquireImage(image_info);
status = OpenBlob(image_info, image, ReadBinaryBlobMode, exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read MATLAB image.
*/
clone_info=CloneImageInfo(image_info);
if(ReadBlob(image,124,(unsigned char *) &MATLAB_HDR.identific) != 124)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
MATLAB_HDR.Version = ReadBlobLSBShort(image);
if(ReadBlob(image,2,(unsigned char *) &MATLAB_HDR.EndianIndicator) != 2)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule()," Endian %c%c",
MATLAB_HDR.EndianIndicator[0],MATLAB_HDR.EndianIndicator[1]);
if (!strncmp(MATLAB_HDR.EndianIndicator, "IM", 2))
{
ReadBlobXXXLong = ReadBlobLSBLong;
ReadBlobXXXShort = ReadBlobLSBShort;
ReadBlobDoublesXXX = ReadBlobDoublesLSB;
ReadBlobFloatsXXX = ReadBlobFloatsLSB;
image->endian = LSBEndian;
}
else if (!strncmp(MATLAB_HDR.EndianIndicator, "MI", 2))
{
ReadBlobXXXLong = ReadBlobMSBLong;
ReadBlobXXXShort = ReadBlobMSBShort;
ReadBlobDoublesXXX = ReadBlobDoublesMSB;
ReadBlobFloatsXXX = ReadBlobFloatsMSB;
image->endian = MSBEndian;
}
else
goto MATLAB_KO; /* unsupported endian */
if (strncmp(MATLAB_HDR.identific, "MATLAB", 6))
MATLAB_KO: ThrowReaderException(CorruptImageError,"ImproperImageHeader");
filepos = TellBlob(image);
while(!EOFBlob(image)) /* object parser loop */
{
Frames = 1;
(void) SeekBlob(image,filepos,SEEK_SET);
/* printf("pos=%X\n",TellBlob(image)); */
MATLAB_HDR.DataType = ReadBlobXXXLong(image);
if(EOFBlob(image)) break;
MATLAB_HDR.ObjectSize = ReadBlobXXXLong(image);
if(EOFBlob(image)) break;
filepos += MATLAB_HDR.ObjectSize + 4 + 4;
image2 = image;
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if(MATLAB_HDR.DataType == miCOMPRESSED)
{
image2 = DecompressBlock(image,MATLAB_HDR.ObjectSize,clone_info,exception);
if(image2==NULL) continue;
MATLAB_HDR.DataType = ReadBlobXXXLong(image2); /* replace compressed object type. */
}
#endif
if(MATLAB_HDR.DataType!=miMATRIX) continue; /* skip another objects. */
MATLAB_HDR.unknown1 = ReadBlobXXXLong(image2);
MATLAB_HDR.unknown2 = ReadBlobXXXLong(image2);
MATLAB_HDR.unknown5 = ReadBlobXXXLong(image2);
MATLAB_HDR.StructureClass = MATLAB_HDR.unknown5 & 0xFF;
MATLAB_HDR.StructureFlag = (MATLAB_HDR.unknown5>>8) & 0xFF;
MATLAB_HDR.unknown3 = ReadBlobXXXLong(image2);
if(image!=image2)
MATLAB_HDR.unknown4 = ReadBlobXXXLong(image2); /* ??? don't understand why ?? */
MATLAB_HDR.unknown4 = ReadBlobXXXLong(image2);
MATLAB_HDR.DimFlag = ReadBlobXXXLong(image2);
MATLAB_HDR.SizeX = ReadBlobXXXLong(image2);
MATLAB_HDR.SizeY = ReadBlobXXXLong(image2);
switch(MATLAB_HDR.DimFlag)
{
case 8: z2=z=1; break; /* 2D matrix*/
case 12: z2=z = ReadBlobXXXLong(image2); /* 3D matrix RGB*/
Unknown6 = ReadBlobXXXLong(image2);
(void) Unknown6;
if(z!=3) ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
break;
case 16: z2=z = ReadBlobXXXLong(image2); /* 4D matrix animation */
if(z!=3 && z!=1)
ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
Frames = ReadBlobXXXLong(image2);
break;
default: ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
}
MATLAB_HDR.Flag1 = ReadBlobXXXShort(image2);
MATLAB_HDR.NameFlag = ReadBlobXXXShort(image2);
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
"MATLAB_HDR.StructureClass %d",MATLAB_HDR.StructureClass);
if (MATLAB_HDR.StructureClass != mxCHAR_CLASS &&
MATLAB_HDR.StructureClass != mxSINGLE_CLASS && /* float + complex float */
MATLAB_HDR.StructureClass != mxDOUBLE_CLASS && /* double + complex double */
MATLAB_HDR.StructureClass != mxINT8_CLASS &&
MATLAB_HDR.StructureClass != mxUINT8_CLASS && /* uint8 + uint8 3D */
MATLAB_HDR.StructureClass != mxINT16_CLASS &&
MATLAB_HDR.StructureClass != mxUINT16_CLASS && /* uint16 + uint16 3D */
MATLAB_HDR.StructureClass != mxINT32_CLASS &&
MATLAB_HDR.StructureClass != mxUINT32_CLASS && /* uint32 + uint32 3D */
MATLAB_HDR.StructureClass != mxINT64_CLASS &&
MATLAB_HDR.StructureClass != mxUINT64_CLASS) /* uint64 + uint64 3D */
ThrowReaderException(CoderError,"UnsupportedCellTypeInTheMatrix");
switch (MATLAB_HDR.NameFlag)
{
case 0:
size = ReadBlobXXXLong(image2); /* Object name string size */
size = 4 * (ssize_t) ((size + 3 + 1) / 4);
(void) SeekBlob(image2, size, SEEK_CUR);
break;
case 1:
case 2:
case 3:
case 4:
(void) ReadBlob(image2, 4, (unsigned char *) &size); /* Object name string */
break;
default:
goto MATLAB_KO;
}
CellType = ReadBlobXXXLong(image2); /* Additional object type */
if (logging)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"MATLAB_HDR.CellType: %.20g",(double) CellType);
(void) ReadBlob(image2, 4, (unsigned char *) &size); /* data size */
NEXT_FRAME:
switch (CellType)
{
case miINT8:
case miUINT8:
sample_size = 8;
if(MATLAB_HDR.StructureFlag & FLAG_LOGICAL)
image->depth = 1;
else
image->depth = 8; /* Byte type cell */
ldblk = (ssize_t) MATLAB_HDR.SizeX;
break;
case miINT16:
case miUINT16:
sample_size = 16;
image->depth = 16; /* Word type cell */
ldblk = (ssize_t) (2 * MATLAB_HDR.SizeX);
break;
case miINT32:
case miUINT32:
sample_size = 32;
image->depth = 32; /* Dword type cell */
ldblk = (ssize_t) (4 * MATLAB_HDR.SizeX);
break;
case miINT64:
case miUINT64:
sample_size = 64;
image->depth = 64; /* Qword type cell */
ldblk = (ssize_t) (8 * MATLAB_HDR.SizeX);
break;
case miSINGLE:
sample_size = 32;
image->depth = 32; /* double type cell */
(void) SetImageOption(clone_info,"quantum:format","floating-point");
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* complex float type cell */
}
ldblk = (ssize_t) (4 * MATLAB_HDR.SizeX);
break;
case miDOUBLE:
sample_size = 64;
image->depth = 64; /* double type cell */
(void) SetImageOption(clone_info,"quantum:format","floating-point");
DisableMSCWarning(4127)
if (sizeof(double) != 8)
RestoreMSCWarning
ThrowReaderException(CoderError, "IncompatibleSizeOfDouble");
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* complex double type cell */
}
ldblk = (ssize_t) (8 * MATLAB_HDR.SizeX);
break;
default:
ThrowReaderException(CoderError, "UnsupportedCellTypeInTheMatrix");
}
(void) sample_size;
image->columns = MATLAB_HDR.SizeX;
image->rows = MATLAB_HDR.SizeY;
quantum_info=AcquireQuantumInfo(clone_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
one=1;
image->colors = one << image->depth;
if (image->columns == 0 || image->rows == 0)
goto MATLAB_KO;
/* Image is gray when no complex flag is set and 2D Matrix */
if ((MATLAB_HDR.DimFlag == 8) &&
((MATLAB_HDR.StructureFlag & FLAG_COMPLEX) == 0))
{
SetImageColorspace(image,GRAYColorspace);
image->type=GrayscaleType;
}
/*
If ping is true, then only set image size and colors without
reading any image data.
*/
if (image_info->ping)
{
size_t temp = image->columns;
image->columns = image->rows;
image->rows = temp;
goto done_reading; /* !!!!!! BAD !!!! */
}
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
/* ----- Load raster data ----- */
BImgBuff = (unsigned char *) AcquireQuantumMemory((size_t) (ldblk),sizeof(double)); /* Ldblk was set in the check phase */
if (BImgBuff == NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
MinVal = 0;
MaxVal = 0;
if (CellType==miDOUBLE || CellType==miSINGLE) /* Find Min and Max Values for floats */
{
CalcMinMax(image2, image_info->endian, MATLAB_HDR.SizeX, MATLAB_HDR.SizeY, CellType, ldblk, BImgBuff, &quantum_info->minimum, &quantum_info->maximum);
}
/* Main loop for reading all scanlines */
if(z==1) z=0; /* read grey scanlines */
/* else read color scanlines */
do
{
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
q=GetAuthenticPixels(image,0,MATLAB_HDR.SizeY-i-1,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT set image pixels returns unexpected NULL on a row %u.", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto done_reading; /* Skip image rotation, when cannot set image pixels */
}
if(ReadBlob(image2,ldblk,(unsigned char *)BImgBuff) != (ssize_t) ldblk)
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT cannot read scanrow %u from a file.", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto ExitLoop;
}
if((CellType==miINT8 || CellType==miUINT8) && (MATLAB_HDR.StructureFlag & FLAG_LOGICAL))
{
FixLogical((unsigned char *)BImgBuff,ldblk);
if(ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,z2qtype[z],BImgBuff,exception) <= 0)
{
ImportQuantumPixelsFailed:
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT failed to ImportQuantumPixels for a row %u", (unsigned)(MATLAB_HDR.SizeY-i-1));
break;
}
}
else
{
if(ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,z2qtype[z],BImgBuff,exception) <= 0)
goto ImportQuantumPixelsFailed;
if (z<=1 && /* fix only during a last pass z==0 || z==1 */
(CellType==miINT8 || CellType==miINT16 || CellType==miINT32 || CellType==miINT64))
FixSignedValues(q,MATLAB_HDR.SizeX);
}
if (!SyncAuthenticPixels(image,exception))
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT failed to sync image pixels for a row %u", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto ExitLoop;
}
}
} while(z-- >= 2);
ExitLoop:
/* Read complex part of numbers here */
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* Find Min and Max Values for complex parts of floats */
CellType = ReadBlobXXXLong(image2); /* Additional object type */
i = ReadBlobXXXLong(image2); /* size of a complex part - toss away*/
if (CellType==miDOUBLE || CellType==miSINGLE)
{
CalcMinMax(image2, image_info->endian, MATLAB_HDR.SizeX, MATLAB_HDR.SizeY, CellType, ldblk, BImgBuff, &MinVal, &MaxVal);
}
if (CellType==miDOUBLE)
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
ReadBlobDoublesXXX(image2, ldblk, (double *)BImgBuff);
InsertComplexDoubleRow((double *)BImgBuff, i, image, MinVal, MaxVal);
}
if (CellType==miSINGLE)
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
ReadBlobFloatsXXX(image2, ldblk, (float *)BImgBuff);
InsertComplexFloatRow((float *)BImgBuff, i, image, MinVal, MaxVal);
}
}
/* Image is gray when no complex flag is set and 2D Matrix AGAIN!!! */
if ((MATLAB_HDR.DimFlag == 8) &&
((MATLAB_HDR.StructureFlag & FLAG_COMPLEX) == 0))
image->type=GrayscaleType;
if (image->depth == 1)
image->type=BilevelType;
if(image2==image)
image2 = NULL; /* Remove shadow copy to an image before rotation. */
/* Rotate image. */
rotated_image = RotateImage(image, 90.0, exception);
if (rotated_image != (Image *) NULL)
{
/* Remove page offsets added by RotateImage */
rotated_image->page.x=0;
rotated_image->page.y=0;
blob = rotated_image->blob;
rotated_image->blob = image->blob;
rotated_image->colors = image->colors;
image->blob = blob;
AppendImageToList(&image,rotated_image);
DeleteImageFromList(&image);
}
done_reading:
if(image2!=NULL)
if(image2!=image)
{
DeleteImageFromList(&image2);
if(clone_info)
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
}
}
/* Allocate next image structure. */
AcquireNextImage(image_info,image);
if (image->next == (Image *) NULL) break;
image=SyncNextImageInList(image);
image->columns=image->rows=0;
image->colors=0;
/* row scan buffer is no longer needed */
RelinquishMagickMemory(BImgBuff);
BImgBuff = NULL;
if(--Frames>0)
{
z = z2;
if(image2==NULL) image2 = image;
goto NEXT_FRAME;
}
if(image2!=NULL)
if(image2!=image) /* Does shadow temporary decompressed image exist? */
{
/* CloseBlob(image2); */
DeleteImageFromList(&image2);
if(clone_info)
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) unlink(clone_info->filename);
}
}
}
}
clone_info=DestroyImageInfo(clone_info);
RelinquishMagickMemory(BImgBuff);
CloseBlob(image);
{
Image *p;
ssize_t scene=0;
/*
Rewind list, removing any empty images while rewinding.
*/
p=image;
image=NULL;
while (p != (Image *) NULL)
{
Image *tmp=p;
if ((p->rows == 0) || (p->columns == 0)) {
p=p->previous;
DeleteImageFromList(&tmp);
} else {
image=p;
p=p->previous;
}
}
/*
Fix scene numbers
*/
for (p=image; p != (Image *) NULL; p=p->next)
p->scene=scene++;
}
if(clone_info != NULL) /* cleanup garbage file from compression */
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
DestroyImageInfo(clone_info);
clone_info = NULL;
}
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),"return");
if(image==NULL)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
return (image);
}
Commit Message: https://github.com/ImageMagick/ImageMagick/issues/131
CWE ID: CWE-125 | static Image *ReadMATImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image *image, *image2=NULL,
*rotated_image;
PixelPacket *q;
unsigned int status;
MATHeader MATLAB_HDR;
size_t size;
size_t CellType;
QuantumInfo *quantum_info;
ImageInfo *clone_info;
int i;
ssize_t ldblk;
unsigned char *BImgBuff = NULL;
double MinVal, MaxVal;
size_t Unknown6;
unsigned z, z2;
unsigned Frames;
int logging;
int sample_size;
MagickOffsetType filepos=0x80;
BlobInfo *blob;
size_t one;
unsigned int (*ReadBlobXXXLong)(Image *image);
unsigned short (*ReadBlobXXXShort)(Image *image);
void (*ReadBlobDoublesXXX)(Image * image, size_t len, double *data);
void (*ReadBlobFloatsXXX)(Image * image, size_t len, float *data);
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
logging = LogMagickEvent(CoderEvent,GetMagickModule(),"enter");
/*
Open image file.
*/
image = AcquireImage(image_info);
status = OpenBlob(image_info, image, ReadBinaryBlobMode, exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read MATLAB image.
*/
clone_info=CloneImageInfo(image_info);
if(ReadBlob(image,124,(unsigned char *) &MATLAB_HDR.identific) != 124)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
MATLAB_HDR.Version = ReadBlobLSBShort(image);
if(ReadBlob(image,2,(unsigned char *) &MATLAB_HDR.EndianIndicator) != 2)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule()," Endian %c%c",
MATLAB_HDR.EndianIndicator[0],MATLAB_HDR.EndianIndicator[1]);
if (!strncmp(MATLAB_HDR.EndianIndicator, "IM", 2))
{
ReadBlobXXXLong = ReadBlobLSBLong;
ReadBlobXXXShort = ReadBlobLSBShort;
ReadBlobDoublesXXX = ReadBlobDoublesLSB;
ReadBlobFloatsXXX = ReadBlobFloatsLSB;
image->endian = LSBEndian;
}
else if (!strncmp(MATLAB_HDR.EndianIndicator, "MI", 2))
{
ReadBlobXXXLong = ReadBlobMSBLong;
ReadBlobXXXShort = ReadBlobMSBShort;
ReadBlobDoublesXXX = ReadBlobDoublesMSB;
ReadBlobFloatsXXX = ReadBlobFloatsMSB;
image->endian = MSBEndian;
}
else
goto MATLAB_KO; /* unsupported endian */
if (strncmp(MATLAB_HDR.identific, "MATLAB", 6))
MATLAB_KO: ThrowReaderException(CorruptImageError,"ImproperImageHeader");
filepos = TellBlob(image);
while(!EOFBlob(image)) /* object parser loop */
{
Frames = 1;
(void) SeekBlob(image,filepos,SEEK_SET);
/* printf("pos=%X\n",TellBlob(image)); */
MATLAB_HDR.DataType = ReadBlobXXXLong(image);
if(EOFBlob(image)) break;
MATLAB_HDR.ObjectSize = ReadBlobXXXLong(image);
if(EOFBlob(image)) break;
filepos += MATLAB_HDR.ObjectSize + 4 + 4;
image2 = image;
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if(MATLAB_HDR.DataType == miCOMPRESSED)
{
image2 = DecompressBlock(image,MATLAB_HDR.ObjectSize,clone_info,exception);
if(image2==NULL) continue;
MATLAB_HDR.DataType = ReadBlobXXXLong(image2); /* replace compressed object type. */
}
#endif
if(MATLAB_HDR.DataType!=miMATRIX) continue; /* skip another objects. */
MATLAB_HDR.unknown1 = ReadBlobXXXLong(image2);
MATLAB_HDR.unknown2 = ReadBlobXXXLong(image2);
MATLAB_HDR.unknown5 = ReadBlobXXXLong(image2);
MATLAB_HDR.StructureClass = MATLAB_HDR.unknown5 & 0xFF;
MATLAB_HDR.StructureFlag = (MATLAB_HDR.unknown5>>8) & 0xFF;
MATLAB_HDR.unknown3 = ReadBlobXXXLong(image2);
if(image!=image2)
MATLAB_HDR.unknown4 = ReadBlobXXXLong(image2); /* ??? don't understand why ?? */
MATLAB_HDR.unknown4 = ReadBlobXXXLong(image2);
MATLAB_HDR.DimFlag = ReadBlobXXXLong(image2);
MATLAB_HDR.SizeX = ReadBlobXXXLong(image2);
MATLAB_HDR.SizeY = ReadBlobXXXLong(image2);
switch(MATLAB_HDR.DimFlag)
{
case 8: z2=z=1; break; /* 2D matrix*/
case 12: z2=z = ReadBlobXXXLong(image2); /* 3D matrix RGB*/
Unknown6 = ReadBlobXXXLong(image2);
(void) Unknown6;
if(z!=3) ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
break;
case 16: z2=z = ReadBlobXXXLong(image2); /* 4D matrix animation */
if(z!=3 && z!=1)
ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
Frames = ReadBlobXXXLong(image2);
break;
default: ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported");
}
MATLAB_HDR.Flag1 = ReadBlobXXXShort(image2);
MATLAB_HDR.NameFlag = ReadBlobXXXShort(image2);
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
"MATLAB_HDR.StructureClass %d",MATLAB_HDR.StructureClass);
if (MATLAB_HDR.StructureClass != mxCHAR_CLASS &&
MATLAB_HDR.StructureClass != mxSINGLE_CLASS && /* float + complex float */
MATLAB_HDR.StructureClass != mxDOUBLE_CLASS && /* double + complex double */
MATLAB_HDR.StructureClass != mxINT8_CLASS &&
MATLAB_HDR.StructureClass != mxUINT8_CLASS && /* uint8 + uint8 3D */
MATLAB_HDR.StructureClass != mxINT16_CLASS &&
MATLAB_HDR.StructureClass != mxUINT16_CLASS && /* uint16 + uint16 3D */
MATLAB_HDR.StructureClass != mxINT32_CLASS &&
MATLAB_HDR.StructureClass != mxUINT32_CLASS && /* uint32 + uint32 3D */
MATLAB_HDR.StructureClass != mxINT64_CLASS &&
MATLAB_HDR.StructureClass != mxUINT64_CLASS) /* uint64 + uint64 3D */
ThrowReaderException(CoderError,"UnsupportedCellTypeInTheMatrix");
switch (MATLAB_HDR.NameFlag)
{
case 0:
size = ReadBlobXXXLong(image2); /* Object name string size */
size = 4 * (ssize_t) ((size + 3 + 1) / 4);
(void) SeekBlob(image2, size, SEEK_CUR);
break;
case 1:
case 2:
case 3:
case 4:
(void) ReadBlob(image2, 4, (unsigned char *) &size); /* Object name string */
break;
default:
goto MATLAB_KO;
}
CellType = ReadBlobXXXLong(image2); /* Additional object type */
if (logging)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"MATLAB_HDR.CellType: %.20g",(double) CellType);
(void) ReadBlob(image2, 4, (unsigned char *) &size); /* data size */
NEXT_FRAME:
switch (CellType)
{
case miINT8:
case miUINT8:
sample_size = 8;
if(MATLAB_HDR.StructureFlag & FLAG_LOGICAL)
image->depth = 1;
else
image->depth = 8; /* Byte type cell */
ldblk = (ssize_t) MATLAB_HDR.SizeX;
break;
case miINT16:
case miUINT16:
sample_size = 16;
image->depth = 16; /* Word type cell */
ldblk = (ssize_t) (2 * MATLAB_HDR.SizeX);
break;
case miINT32:
case miUINT32:
sample_size = 32;
image->depth = 32; /* Dword type cell */
ldblk = (ssize_t) (4 * MATLAB_HDR.SizeX);
break;
case miINT64:
case miUINT64:
sample_size = 64;
image->depth = 64; /* Qword type cell */
ldblk = (ssize_t) (8 * MATLAB_HDR.SizeX);
break;
case miSINGLE:
sample_size = 32;
image->depth = 32; /* double type cell */
(void) SetImageOption(clone_info,"quantum:format","floating-point");
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* complex float type cell */
}
ldblk = (ssize_t) (4 * MATLAB_HDR.SizeX);
break;
case miDOUBLE:
sample_size = 64;
image->depth = 64; /* double type cell */
(void) SetImageOption(clone_info,"quantum:format","floating-point");
DisableMSCWarning(4127)
if (sizeof(double) != 8)
RestoreMSCWarning
ThrowReaderException(CoderError, "IncompatibleSizeOfDouble");
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* complex double type cell */
}
ldblk = (ssize_t) (8 * MATLAB_HDR.SizeX);
break;
default:
ThrowReaderException(CoderError, "UnsupportedCellTypeInTheMatrix");
}
(void) sample_size;
image->columns = MATLAB_HDR.SizeX;
image->rows = MATLAB_HDR.SizeY;
quantum_info=AcquireQuantumInfo(clone_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
one=1;
image->colors = one << image->depth;
if (image->columns == 0 || image->rows == 0)
goto MATLAB_KO;
/* Image is gray when no complex flag is set and 2D Matrix */
if ((MATLAB_HDR.DimFlag == 8) &&
((MATLAB_HDR.StructureFlag & FLAG_COMPLEX) == 0))
{
SetImageColorspace(image,GRAYColorspace);
image->type=GrayscaleType;
}
/*
If ping is true, then only set image size and colors without
reading any image data.
*/
if (image_info->ping)
{
size_t temp = image->columns;
image->columns = image->rows;
image->rows = temp;
goto done_reading; /* !!!!!! BAD !!!! */
}
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
/* ----- Load raster data ----- */
BImgBuff = (unsigned char *) AcquireQuantumMemory((size_t) (ldblk),sizeof(double)); /* Ldblk was set in the check phase */
if (BImgBuff == NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
MinVal = 0;
MaxVal = 0;
if (CellType==miDOUBLE || CellType==miSINGLE) /* Find Min and Max Values for floats */
{
CalcMinMax(image2, image_info->endian, MATLAB_HDR.SizeX, MATLAB_HDR.SizeY, CellType, ldblk, BImgBuff, &quantum_info->minimum, &quantum_info->maximum);
}
/* Main loop for reading all scanlines */
if(z==1) z=0; /* read grey scanlines */
/* else read color scanlines */
do
{
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
q=GetAuthenticPixels(image,0,MATLAB_HDR.SizeY-i-1,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT set image pixels returns unexpected NULL on a row %u.", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto done_reading; /* Skip image rotation, when cannot set image pixels */
}
if(ReadBlob(image2,ldblk,(unsigned char *)BImgBuff) != (ssize_t) ldblk)
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT cannot read scanrow %u from a file.", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto ExitLoop;
}
if((CellType==miINT8 || CellType==miUINT8) && (MATLAB_HDR.StructureFlag & FLAG_LOGICAL))
{
FixLogical((unsigned char *)BImgBuff,ldblk);
if(ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,z2qtype[z],BImgBuff,exception) <= 0)
{
ImportQuantumPixelsFailed:
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT failed to ImportQuantumPixels for a row %u", (unsigned)(MATLAB_HDR.SizeY-i-1));
break;
}
}
else
{
if(ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,z2qtype[z],BImgBuff,exception) <= 0)
goto ImportQuantumPixelsFailed;
if (z<=1 && /* fix only during a last pass z==0 || z==1 */
(CellType==miINT8 || CellType==miINT16 || CellType==miINT32 || CellType==miINT64))
FixSignedValues(q,MATLAB_HDR.SizeX);
}
if (!SyncAuthenticPixels(image,exception))
{
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),
" MAT failed to sync image pixels for a row %u", (unsigned)(MATLAB_HDR.SizeY-i-1));
goto ExitLoop;
}
}
} while(z-- >= 2);
quantum_info=DestroyQuantumInfo(quantum_info);
ExitLoop:
/* Read complex part of numbers here */
if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX)
{ /* Find Min and Max Values for complex parts of floats */
CellType = ReadBlobXXXLong(image2); /* Additional object type */
i = ReadBlobXXXLong(image2); /* size of a complex part - toss away*/
if (CellType==miDOUBLE || CellType==miSINGLE)
{
CalcMinMax(image2, image_info->endian, MATLAB_HDR.SizeX, MATLAB_HDR.SizeY, CellType, ldblk, BImgBuff, &MinVal, &MaxVal);
}
if (CellType==miDOUBLE)
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
ReadBlobDoublesXXX(image2, ldblk, (double *)BImgBuff);
InsertComplexDoubleRow((double *)BImgBuff, i, image, MinVal, MaxVal);
}
if (CellType==miSINGLE)
for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++)
{
ReadBlobFloatsXXX(image2, ldblk, (float *)BImgBuff);
InsertComplexFloatRow((float *)BImgBuff, i, image, MinVal, MaxVal);
}
}
/* Image is gray when no complex flag is set and 2D Matrix AGAIN!!! */
if ((MATLAB_HDR.DimFlag == 8) &&
((MATLAB_HDR.StructureFlag & FLAG_COMPLEX) == 0))
image->type=GrayscaleType;
if (image->depth == 1)
image->type=BilevelType;
if(image2==image)
image2 = NULL; /* Remove shadow copy to an image before rotation. */
/* Rotate image. */
rotated_image = RotateImage(image, 90.0, exception);
if (rotated_image != (Image *) NULL)
{
/* Remove page offsets added by RotateImage */
rotated_image->page.x=0;
rotated_image->page.y=0;
blob = rotated_image->blob;
rotated_image->blob = image->blob;
rotated_image->colors = image->colors;
image->blob = blob;
AppendImageToList(&image,rotated_image);
DeleteImageFromList(&image);
}
done_reading:
if(image2!=NULL)
if(image2!=image)
{
DeleteImageFromList(&image2);
if(clone_info)
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
}
}
/* Allocate next image structure. */
AcquireNextImage(image_info,image);
if (image->next == (Image *) NULL) break;
image=SyncNextImageInList(image);
image->columns=image->rows=0;
image->colors=0;
/* row scan buffer is no longer needed */
RelinquishMagickMemory(BImgBuff);
BImgBuff = NULL;
if(--Frames>0)
{
z = z2;
if(image2==NULL) image2 = image;
goto NEXT_FRAME;
}
if(image2!=NULL)
if(image2!=image) /* Does shadow temporary decompressed image exist? */
{
/* CloseBlob(image2); */
DeleteImageFromList(&image2);
if(clone_info)
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) unlink(clone_info->filename);
}
}
}
}
clone_info=DestroyImageInfo(clone_info);
RelinquishMagickMemory(BImgBuff);
CloseBlob(image);
{
Image *p;
ssize_t scene=0;
/*
Rewind list, removing any empty images while rewinding.
*/
p=image;
image=NULL;
while (p != (Image *) NULL)
{
Image *tmp=p;
if ((p->rows == 0) || (p->columns == 0)) {
p=p->previous;
DeleteImageFromList(&tmp);
} else {
image=p;
p=p->previous;
}
}
/*
Fix scene numbers
*/
for (p=image; p != (Image *) NULL; p=p->next)
p->scene=scene++;
}
if(clone_info != NULL) /* cleanup garbage file from compression */
{
if(clone_info->file)
{
fclose(clone_info->file);
clone_info->file = NULL;
(void) remove_utf8(clone_info->filename);
}
DestroyImageInfo(clone_info);
clone_info = NULL;
}
if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),"return");
if(image==NULL)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
return (image);
}
| 168,535 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void RenderWidgetHostViewAura::CopyFromCompositingSurface(
const gfx::Rect& src_subrect,
const gfx::Size& dst_size,
const base::Callback<void(bool)>& callback,
skia::PlatformBitmap* output) {
base::ScopedClosureRunner scoped_callback_runner(base::Bind(callback, false));
std::map<uint64, scoped_refptr<ui::Texture> >::iterator it =
image_transport_clients_.find(current_surface_);
if (it == image_transport_clients_.end())
return;
ui::Texture* container = it->second;
DCHECK(container);
gfx::Size dst_size_in_pixel = ConvertSizeToPixel(this, dst_size);
if (!output->Allocate(
dst_size_in_pixel.width(), dst_size_in_pixel.height(), true))
return;
ImageTransportFactory* factory = ImageTransportFactory::GetInstance();
GLHelper* gl_helper = factory->GetGLHelper();
if (!gl_helper)
return;
unsigned char* addr = static_cast<unsigned char*>(
output->GetBitmap().getPixels());
scoped_callback_runner.Release();
base::Callback<void(bool)> wrapper_callback = base::Bind(
&RenderWidgetHostViewAura::CopyFromCompositingSurfaceFinished,
AsWeakPtr(),
callback);
++pending_thumbnail_tasks_;
gfx::Rect src_subrect_in_gl = src_subrect;
src_subrect_in_gl.set_y(GetViewBounds().height() - src_subrect.bottom());
gfx::Rect src_subrect_in_pixel = ConvertRectToPixel(this, src_subrect_in_gl);
gl_helper->CropScaleReadbackAndCleanTexture(container->PrepareTexture(),
container->size(),
src_subrect_in_pixel,
dst_size_in_pixel,
addr,
wrapper_callback);
}
Commit Message: Implement TextureImageTransportSurface using texture mailbox
This has a couple of advantages:
- allow tearing down and recreating the UI parent context without
losing the renderer contexts
- do not require a context to be able to generate textures when
creating the GLSurfaceHandle
- clearer ownership semantics that potentially allows for more
robust and easier lost context handling/thumbnailing/etc., since a texture is at
any given time owned by either: UI parent, mailbox, or
TextureImageTransportSurface
- simplify frontbuffer protection logic;
the frontbuffer textures are now owned by RWHV where they are refcounted
The TextureImageTransportSurface informs RenderWidgetHostView of the
mailbox names for the front- and backbuffer textures by
associating them with a surface_handle (1 or 2) in the AcceleratedSurfaceNew message.
During SwapBuffers() or PostSubBuffer() cycles, it then uses
produceTextureCHROMIUM() and consumeTextureCHROMIUM()
to transfer ownership between renderer and browser compositor.
RWHV sends back the surface_handle of the buffer being returned with the Swap ACK
(or 0 if no buffer is being returned in which case TextureImageTransportSurface will
allocate a new texture - note that this could be used to
simply keep textures for thumbnailing).
BUG=154815,139616
[email protected]
Review URL: https://chromiumcodereview.appspot.com/11194042
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@171569 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: | void RenderWidgetHostViewAura::CopyFromCompositingSurface(
const gfx::Rect& src_subrect,
const gfx::Size& dst_size,
const base::Callback<void(bool)>& callback,
skia::PlatformBitmap* output) {
base::ScopedClosureRunner scoped_callback_runner(base::Bind(callback, false));
std::map<uint64, scoped_refptr<ui::Texture> >::iterator it =
image_transport_clients_.find(current_surface_);
if (it == image_transport_clients_.end())
return;
ui::Texture* container = it->second;
DCHECK(container);
gfx::Size dst_size_in_pixel = ConvertSizeToPixel(this, dst_size);
if (!output->Allocate(
dst_size_in_pixel.width(), dst_size_in_pixel.height(), true))
return;
ImageTransportFactory* factory = ImageTransportFactory::GetInstance();
GLHelper* gl_helper = factory->GetGLHelper();
if (!gl_helper)
return;
unsigned char* addr = static_cast<unsigned char*>(
output->GetBitmap().getPixels());
scoped_callback_runner.Release();
// own completion handlers (where we can try to free the frontbuffer).
base::Callback<void(bool)> wrapper_callback = base::Bind(
&RenderWidgetHostViewAura::CopyFromCompositingSurfaceFinished,
AsWeakPtr(),
callback);
++pending_thumbnail_tasks_;
gfx::Rect src_subrect_in_gl = src_subrect;
src_subrect_in_gl.set_y(GetViewBounds().height() - src_subrect.bottom());
gfx::Rect src_subrect_in_pixel = ConvertRectToPixel(this, src_subrect_in_gl);
gl_helper->CropScaleReadbackAndCleanTexture(container->PrepareTexture(),
container->size(),
src_subrect_in_pixel,
dst_size_in_pixel,
addr,
wrapper_callback);
}
| 171,377 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: chunk_grow(chunk_t *chunk, size_t sz)
{
off_t offset;
size_t memlen_orig = chunk->memlen;
tor_assert(sz > chunk->memlen);
offset = chunk->data - chunk->mem;
chunk = tor_realloc(chunk, CHUNK_ALLOC_SIZE(sz));
chunk->memlen = sz;
chunk->data = chunk->mem + offset;
#ifdef DEBUG_CHUNK_ALLOC
tor_assert(chunk->DBG_alloc == CHUNK_ALLOC_SIZE(memlen_orig));
chunk->DBG_alloc = CHUNK_ALLOC_SIZE(sz);
#endif
total_bytes_allocated_in_chunks +=
CHUNK_ALLOC_SIZE(sz) - CHUNK_ALLOC_SIZE(memlen_orig);
return chunk;
}
Commit Message: Add a one-word sentinel value of 0x0 at the end of each buf_t chunk
This helps protect against bugs where any part of a buf_t's memory
is passed to a function that expects a NUL-terminated input.
It also closes TROVE-2016-10-001 (aka bug 20384).
CWE ID: CWE-119 | chunk_grow(chunk_t *chunk, size_t sz)
{
off_t offset;
const size_t memlen_orig = chunk->memlen;
const size_t orig_alloc = CHUNK_ALLOC_SIZE(memlen_orig);
const size_t new_alloc = CHUNK_ALLOC_SIZE(sz);
tor_assert(sz > chunk->memlen);
offset = chunk->data - chunk->mem;
chunk = tor_realloc(chunk, new_alloc);
chunk->memlen = sz;
chunk->data = chunk->mem + offset;
#ifdef DEBUG_CHUNK_ALLOC
tor_assert(chunk->DBG_alloc == orig_alloc);
chunk->DBG_alloc = new_alloc;
#endif
total_bytes_allocated_in_chunks += new_alloc - orig_alloc;
CHUNK_SET_SENTINEL(chunk, new_alloc);
return chunk;
}
| 168,757 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void DownloadItemImpl::UpdateProgress(int64 bytes_so_far,
const std::string& hash_state) {
hash_state_ = hash_state;
received_bytes_ = bytes_so_far;
if (received_bytes_ > total_bytes_)
total_bytes_ = 0;
if (bound_net_log_.IsLoggingAllEvents()) {
bound_net_log_.AddEvent(
net::NetLog::TYPE_DOWNLOAD_ITEM_UPDATED,
net::NetLog::Int64Callback("bytes_so_far", received_bytes_));
}
}
Commit Message: Refactors to simplify rename pathway in DownloadFileManager.
This is https://chromiumcodereview.appspot.com/10668004 / r144817 (reverted
due to CrOS failure) with the completion logic moved to after the
auto-opening. The tests that test the auto-opening (for web store install)
were waiting for download completion to check install, and hence were
failing when completion was moved earlier.
Doing this right would probably require another state (OPENED).
BUG=123998
BUG-134930
[email protected]
Review URL: https://chromiumcodereview.appspot.com/10701040
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@145157 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | void DownloadItemImpl::UpdateProgress(int64 bytes_so_far,
int64 bytes_per_sec,
const std::string& hash_state) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
if (!IsInProgress()) {
// Ignore if we're no longer in-progress. This can happen if we race a
// Cancel on the UI thread with an update on the FILE thread.
//
// TODO(rdsmith): Arguably we should let this go through, as this means
// the download really did get further than we know before it was
// cancelled. But the gain isn't very large, and the code is more
// fragile if it has to support in progress updates in a non-in-progress
// state. This issue should be readdressed when we revamp performance
// reporting.
return;
}
bytes_per_sec_ = bytes_per_sec;
hash_state_ = hash_state;
received_bytes_ = bytes_so_far;
if (received_bytes_ > total_bytes_)
total_bytes_ = 0;
if (bound_net_log_.IsLoggingAllEvents()) {
bound_net_log_.AddEvent(
net::NetLog::TYPE_DOWNLOAD_ITEM_UPDATED,
net::NetLog::Int64Callback("bytes_so_far", received_bytes_));
}
| 170,885 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static unsigned int ipv6_defrag(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
int err;
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
/* Previously seen (loopback)? */
if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
return NF_ACCEPT;
#endif
err = nf_ct_frag6_gather(state->net, skb,
nf_ct6_defrag_user(state->hook, skb));
/* queued */
if (err == -EINPROGRESS)
return NF_STOLEN;
return NF_ACCEPT;
}
Commit Message: netfilter: ipv6: nf_defrag: drop mangled skb on ream error
Dmitry Vyukov reported GPF in network stack that Andrey traced down to
negative nh offset in nf_ct_frag6_queue().
Problem is that all network headers before fragment header are pulled.
Normal ipv6 reassembly will drop the skb when errors occur further down
the line.
netfilter doesn't do this, and instead passed the original fragment
along. That was also fine back when netfilter ipv6 defrag worked with
cloned fragments, as the original, pristine fragment was passed on.
So we either have to undo the pull op, or discard such fragments.
Since they're malformed after all (e.g. overlapping fragment) it seems
preferrable to just drop them.
Same for temporary errors -- it doesn't make sense to accept (and
perhaps forward!) only some fragments of same datagram.
Fixes: 029f7f3b8701cc7ac ("netfilter: ipv6: nf_defrag: avoid/free clone operations")
Reported-by: Dmitry Vyukov <[email protected]>
Debugged-by: Andrey Konovalov <[email protected]>
Diagnosed-by: Eric Dumazet <Eric Dumazet <[email protected]>
Signed-off-by: Florian Westphal <[email protected]>
Acked-by: Eric Dumazet <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
CWE ID: CWE-787 | static unsigned int ipv6_defrag(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
int err;
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
/* Previously seen (loopback)? */
if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
return NF_ACCEPT;
#endif
err = nf_ct_frag6_gather(state->net, skb,
nf_ct6_defrag_user(state->hook, skb));
/* queued */
if (err == -EINPROGRESS)
return NF_STOLEN;
return err == 0 ? NF_ACCEPT : NF_DROP;
}
| 166,851 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: long Cluster::GetLast(const BlockEntry*& pLast) const
{
for (;;)
{
long long pos;
long len;
const long status = Parse(pos, len);
if (status < 0) //error
{
pLast = NULL;
return status;
}
if (status > 0) //no new block
break;
}
if (m_entries_count <= 0)
{
pLast = NULL;
return 0;
}
assert(m_entries);
const long idx = m_entries_count - 1;
pLast = m_entries[idx];
assert(pLast);
return 0;
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | long Cluster::GetLast(const BlockEntry*& pLast) const
if (m_entries_count <= 0) {
pLast = NULL;
return 0;
}
assert(m_entries);
const long idx = m_entries_count - 1;
pLast = m_entries[idx];
assert(pLast);
return 0;
}
| 174,338 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int readContigStripsIntoBuffer (TIFF* in, uint8* buf)
{
uint8* bufp = buf;
int32 bytes_read = 0;
uint32 strip, nstrips = TIFFNumberOfStrips(in);
uint32 stripsize = TIFFStripSize(in);
uint32 rows = 0;
uint32 rps = TIFFGetFieldDefaulted(in, TIFFTAG_ROWSPERSTRIP, &rps);
tsize_t scanline_size = TIFFScanlineSize(in);
if (scanline_size == 0) {
TIFFError("", "TIFF scanline size is zero!");
return 0;
}
for (strip = 0; strip < nstrips; strip++) {
bytes_read = TIFFReadEncodedStrip (in, strip, bufp, -1);
rows = bytes_read / scanline_size;
if ((strip < (nstrips - 1)) && (bytes_read != (int32)stripsize))
TIFFError("", "Strip %d: read %lu bytes, strip size %lu",
(int)strip + 1, (unsigned long) bytes_read,
(unsigned long)stripsize);
if (bytes_read < 0 && !ignore) {
TIFFError("", "Error reading strip %lu after %lu rows",
(unsigned long) strip, (unsigned long)rows);
return 0;
}
bufp += bytes_read;
}
return 1;
} /* end readContigStripsIntoBuffer */
Commit Message: * tools/tiffcrop.c: fix readContigStripsIntoBuffer() in -i (ignore) mode so
that the output buffer is correctly incremented to avoid write outside bounds.
Reported by Agostino Sarubbo.
Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2620
CWE ID: CWE-119 | static int readContigStripsIntoBuffer (TIFF* in, uint8* buf)
{
uint8* bufp = buf;
int32 bytes_read = 0;
uint32 strip, nstrips = TIFFNumberOfStrips(in);
uint32 stripsize = TIFFStripSize(in);
uint32 rows = 0;
uint32 rps = TIFFGetFieldDefaulted(in, TIFFTAG_ROWSPERSTRIP, &rps);
tsize_t scanline_size = TIFFScanlineSize(in);
if (scanline_size == 0) {
TIFFError("", "TIFF scanline size is zero!");
return 0;
}
for (strip = 0; strip < nstrips; strip++) {
bytes_read = TIFFReadEncodedStrip (in, strip, bufp, -1);
rows = bytes_read / scanline_size;
if ((strip < (nstrips - 1)) && (bytes_read != (int32)stripsize))
TIFFError("", "Strip %d: read %lu bytes, strip size %lu",
(int)strip + 1, (unsigned long) bytes_read,
(unsigned long)stripsize);
if (bytes_read < 0 && !ignore) {
TIFFError("", "Error reading strip %lu after %lu rows",
(unsigned long) strip, (unsigned long)rows);
return 0;
}
bufp += stripsize;
}
return 1;
} /* end readContigStripsIntoBuffer */
| 168,461 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: GURL DecorateFrontendURL(const GURL& base_url) {
std::string frontend_url = base_url.spec();
std::string url_string(
frontend_url +
((frontend_url.find("?") == std::string::npos) ? "?" : "&") +
"dockSide=undocked"); // TODO(dgozman): remove this support in M38.
base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
if (command_line->HasSwitch(switches::kEnableDevToolsExperiments))
url_string += "&experiments=true";
if (command_line->HasSwitch(switches::kDevToolsFlags)) {
std::string flags = command_line->GetSwitchValueASCII(
switches::kDevToolsFlags);
flags = net::EscapeQueryParamValue(flags, false);
url_string += "&flags=" + flags;
}
#if defined(DEBUG_DEVTOOLS)
url_string += "&debugFrontend=true";
#endif // defined(DEBUG_DEVTOOLS)
return GURL(url_string);
}
Commit Message: [DevTools] Move sanitize url to devtools_ui.cc.
Compatibility script is not reliable enough.
BUG=653134
Review-Url: https://codereview.chromium.org/2403633002
Cr-Commit-Position: refs/heads/master@{#425814}
CWE ID: CWE-200 | GURL DecorateFrontendURL(const GURL& base_url) {
std::string frontend_url = base_url.spec();
std::string url_string(
frontend_url +
((frontend_url.find("?") == std::string::npos) ? "?" : "&") +
"dockSide=undocked"); // TODO(dgozman): remove this support in M38.
base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
if (command_line->HasSwitch(switches::kEnableDevToolsExperiments))
url_string += "&experiments=true";
if (command_line->HasSwitch(switches::kDevToolsFlags)) {
url_string += "&" + command_line->GetSwitchValueASCII(
switches::kDevToolsFlags);
}
#if defined(DEBUG_DEVTOOLS)
url_string += "&debugFrontend=true";
#endif // defined(DEBUG_DEVTOOLS)
return GURL(url_string);
}
| 172,508 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: std::string GetDMToken() {
std::string dm_token = *GetTestingDMToken();
#if !defined(OS_CHROMEOS)
if (dm_token.empty() &&
policy::ChromeBrowserCloudManagementController::IsEnabled()) {
dm_token = policy::BrowserDMTokenStorage::Get()->RetrieveDMToken();
}
#endif
return dm_token;
}
Commit Message: Migrate download_protection code to new DM token class.
Migrates RetrieveDMToken calls to use the new BrowserDMToken class.
Bug: 1020296
Change-Id: Icef580e243430d73b6c1c42b273a8540277481d9
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1904234
Commit-Queue: Dominique Fauteux-Chapleau <[email protected]>
Reviewed-by: Tien Mai <[email protected]>
Reviewed-by: Daniel Rubery <[email protected]>
Cr-Commit-Position: refs/heads/master@{#714196}
CWE ID: CWE-20 | std::string GetDMToken() {
BrowserDMToken GetTestingDMToken() {
const char* dm_token = *GetTestingDMTokenStorage();
return dm_token && dm_token[0] ? BrowserDMToken::CreateValidToken(dm_token)
: BrowserDMToken::CreateEmptyToken();
}
policy::BrowserDMTokenStorage::BrowserDMToken GetDMToken() {
auto dm_token = GetTestingDMToken();
#if !defined(OS_CHROMEOS)
if (dm_token.is_empty() &&
policy::ChromeBrowserCloudManagementController::IsEnabled()) {
dm_token = policy::BrowserDMTokenStorage::Get()->RetrieveBrowserDMToken();
}
#endif
return dm_token;
}
| 172,353 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool WebDriverCommand::Init(Response* const response) {
std::string session_id = GetPathVariable(2);
if (session_id.length() == 0) {
response->SetError(
new Error(kBadRequest, "No session ID specified"));
return false;
}
VLOG(1) << "Fetching session: " << session_id;
session_ = SessionManager::GetInstance()->GetSession(session_id);
if (session_ == NULL) {
response->SetError(
new Error(kSessionNotFound, "Session not found: " + session_id));
return false;
}
scoped_ptr<Error> error(session_->WaitForAllTabsToStopLoading());
if (error.get()) {
LOG(WARNING) << error->ToString();
}
error.reset(session_->SwitchToTopFrameIfCurrentFrameInvalid());
if (error.get()) {
LOG(WARNING) << error->ToString();
}
response->SetField("sessionId", Value::CreateStringValue(session_id));
return true;
}
Commit Message: In chromedriver, add /log url to get the contents of the chromedriver log
remotely. Also add a 'chrome.verbose' boolean startup option.
Remove usage of VLOG(1) in chromedriver. We do not need as complicated
logging as in Chrome.
BUG=85241
TEST=none
Review URL: http://codereview.chromium.org/7104085
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@88591 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | bool WebDriverCommand::Init(Response* const response) {
std::string session_id = GetPathVariable(2);
if (session_id.length() == 0) {
response->SetError(
new Error(kBadRequest, "No session ID specified"));
return false;
}
session_ = SessionManager::GetInstance()->GetSession(session_id);
if (session_ == NULL) {
response->SetError(
new Error(kSessionNotFound, "Session not found: " + session_id));
return false;
}
LOG(INFO) << "Waiting for the page to stop loading";
Error* error = session_->WaitForAllTabsToStopLoading();
if (error) {
response->SetError(error);
return false;
}
LOG(INFO) << "Done waiting for the page to stop loading";
error = session_->SwitchToTopFrameIfCurrentFrameInvalid();
if (error) {
response->SetError(error);
return false;
}
response->SetField("sessionId", Value::CreateStringValue(session_id));
return true;
}
| 170,454 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool KeyMap::Press(const scoped_refptr<WindowProxy>& window,
const ui::KeyboardCode key_code,
const wchar_t& key) {
if (key_code == ui::VKEY_SHIFT) {
shift_ = !shift_;
} else if (key_code == ui::VKEY_CONTROL) {
control_ = !control_;
} else if (key_code == ui::VKEY_MENU) { // ALT
alt_ = !alt_;
} else if (key_code == ui::VKEY_COMMAND) {
command_ = !command_;
}
int modifiers = 0;
if (shift_ || shifted_keys_.find(key) != shifted_keys_.end()) {
modifiers = modifiers | ui::EF_SHIFT_DOWN;
}
if (control_) {
modifiers = modifiers | ui::EF_CONTROL_DOWN;
}
if (alt_) {
modifiers = modifiers | ui::EF_ALT_DOWN;
}
if (command_) {
VLOG(1) << "Pressing command key on linux!!";
modifiers = modifiers | ui::EF_COMMAND_DOWN;
}
window->SimulateOSKeyPress(key_code, modifiers);
return true;
}
Commit Message: In chromedriver, add /log url to get the contents of the chromedriver log
remotely. Also add a 'chrome.verbose' boolean startup option.
Remove usage of VLOG(1) in chromedriver. We do not need as complicated
logging as in Chrome.
BUG=85241
TEST=none
Review URL: http://codereview.chromium.org/7104085
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@88591 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | bool KeyMap::Press(const scoped_refptr<WindowProxy>& window,
const ui::KeyboardCode key_code,
const wchar_t& key) {
if (key_code == ui::VKEY_SHIFT) {
shift_ = !shift_;
} else if (key_code == ui::VKEY_CONTROL) {
control_ = !control_;
} else if (key_code == ui::VKEY_MENU) { // ALT
alt_ = !alt_;
} else if (key_code == ui::VKEY_COMMAND) {
command_ = !command_;
}
int modifiers = 0;
if (shift_ || shifted_keys_.find(key) != shifted_keys_.end()) {
modifiers = modifiers | ui::EF_SHIFT_DOWN;
}
if (control_) {
modifiers = modifiers | ui::EF_CONTROL_DOWN;
}
if (alt_) {
modifiers = modifiers | ui::EF_ALT_DOWN;
}
if (command_) {
LOG(INFO) << "Pressing command key on linux!!";
modifiers = modifiers | ui::EF_COMMAND_DOWN;
}
window->SimulateOSKeyPress(key_code, modifiers);
return true;
}
| 170,459 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool FrameSelection::SetSelectionDeprecated(
const SelectionInDOMTree& passed_selection,
const SetSelectionData& options) {
DCHECK(IsAvailable());
passed_selection.AssertValidFor(GetDocument());
SelectionInDOMTree::Builder builder(passed_selection);
if (ShouldAlwaysUseDirectionalSelection(frame_))
builder.SetIsDirectional(true);
SelectionInDOMTree new_selection = builder.Build();
if (granularity_strategy_ && !options.DoNotClearStrategy())
granularity_strategy_->Clear();
granularity_ = options.Granularity();
if (options.ShouldCloseTyping())
TypingCommand::CloseTyping(frame_);
if (options.ShouldClearTypingStyle())
frame_->GetEditor().ClearTypingStyle();
const SelectionInDOMTree old_selection_in_dom_tree =
selection_editor_->GetSelectionInDOMTree();
if (old_selection_in_dom_tree == new_selection)
return false;
selection_editor_->SetSelection(new_selection);
ScheduleVisualUpdateForPaintInvalidationIfNeeded();
const Document& current_document = GetDocument();
frame_->GetEditor().RespondToChangedSelection(
old_selection_in_dom_tree.ComputeStartPosition(),
options.ShouldCloseTyping() ? TypingContinuation::kEnd
: TypingContinuation::kContinue);
DCHECK_EQ(current_document, GetDocument());
return true;
}
Commit Message: Move SelectionTemplate::is_handle_visible_ to FrameSelection
This patch moves |is_handle_visible_| to |FrameSelection| from |SelectionTemplate|
since handle visibility is used only for setting |FrameSelection|, hence it is
a redundant member variable of |SelectionTemplate|.
Bug: 742093
Change-Id: I3add4da3844fb40be34dcb4d4b46b5fa6fed1d7e
Reviewed-on: https://chromium-review.googlesource.com/595389
Commit-Queue: Yoshifumi Inoue <[email protected]>
Reviewed-by: Xiaocheng Hu <[email protected]>
Reviewed-by: Kent Tamura <[email protected]>
Cr-Commit-Position: refs/heads/master@{#491660}
CWE ID: CWE-119 | bool FrameSelection::SetSelectionDeprecated(
const SelectionInDOMTree& passed_selection,
const SetSelectionData& options) {
DCHECK(IsAvailable());
passed_selection.AssertValidFor(GetDocument());
SelectionInDOMTree::Builder builder(passed_selection);
if (ShouldAlwaysUseDirectionalSelection(frame_))
builder.SetIsDirectional(true);
SelectionInDOMTree new_selection = builder.Build();
if (granularity_strategy_ && !options.DoNotClearStrategy())
granularity_strategy_->Clear();
granularity_ = options.Granularity();
if (options.ShouldCloseTyping())
TypingCommand::CloseTyping(frame_);
if (options.ShouldClearTypingStyle())
frame_->GetEditor().ClearTypingStyle();
const SelectionInDOMTree old_selection_in_dom_tree =
selection_editor_->GetSelectionInDOMTree();
const bool is_changed = old_selection_in_dom_tree != new_selection;
const bool should_show_handle = options.ShouldShowHandle();
if (!is_changed && is_handle_visible_ == should_show_handle)
return false;
if (is_changed)
selection_editor_->SetSelection(new_selection);
is_handle_visible_ = should_show_handle;
ScheduleVisualUpdateForPaintInvalidationIfNeeded();
const Document& current_document = GetDocument();
frame_->GetEditor().RespondToChangedSelection(
old_selection_in_dom_tree.ComputeStartPosition(),
options.ShouldCloseTyping() ? TypingContinuation::kEnd
: TypingContinuation::kContinue);
DCHECK_EQ(current_document, GetDocument());
return true;
}
| 171,760 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: chdlc_print(netdissect_options *ndo, register const u_char *p, u_int length)
{
u_int proto;
const u_char *bp = p;
if (length < CHDLC_HDRLEN)
goto trunc;
ND_TCHECK2(*p, CHDLC_HDRLEN);
proto = EXTRACT_16BITS(&p[2]);
if (ndo->ndo_eflag) {
ND_PRINT((ndo, "%s, ethertype %s (0x%04x), length %u: ",
tok2str(chdlc_cast_values, "0x%02x", p[0]),
tok2str(ethertype_values, "Unknown", proto),
proto,
length));
}
length -= CHDLC_HDRLEN;
p += CHDLC_HDRLEN;
switch (proto) {
case ETHERTYPE_IP:
ip_print(ndo, p, length);
break;
case ETHERTYPE_IPV6:
ip6_print(ndo, p, length);
break;
case CHDLC_TYPE_SLARP:
chdlc_slarp_print(ndo, p, length);
break;
#if 0
case CHDLC_TYPE_CDP:
chdlc_cdp_print(p, length);
break;
#endif
case ETHERTYPE_MPLS:
case ETHERTYPE_MPLS_MULTI:
mpls_print(ndo, p, length);
break;
case ETHERTYPE_ISO:
/* is the fudge byte set ? lets verify by spotting ISO headers */
if (length < 2)
goto trunc;
ND_TCHECK_16BITS(p);
if (*(p+1) == 0x81 ||
*(p+1) == 0x82 ||
*(p+1) == 0x83)
isoclns_print(ndo, p + 1, length - 1, ndo->ndo_snapend - p - 1);
else
isoclns_print(ndo, p, length, ndo->ndo_snapend - p);
break;
default:
if (!ndo->ndo_eflag)
ND_PRINT((ndo, "unknown CHDLC protocol (0x%04x)", proto));
break;
}
return (CHDLC_HDRLEN);
trunc:
ND_PRINT((ndo, "[|chdlc]"));
return ndo->ndo_snapend - bp;
}
Commit Message: CVE-2017-12897/ISO CLNS: Use ND_TTEST() for the bounds checks in isoclns_print().
This fixes a buffer over-read discovered by Kamil Frankowicz.
Don't pass the remaining caplen - that's too hard to get right, and we
were getting it wrong in at least one case; just use ND_TTEST().
Add a test using the capture file supplied by the reporter(s).
CWE ID: CWE-125 | chdlc_print(netdissect_options *ndo, register const u_char *p, u_int length)
{
u_int proto;
const u_char *bp = p;
if (length < CHDLC_HDRLEN)
goto trunc;
ND_TCHECK2(*p, CHDLC_HDRLEN);
proto = EXTRACT_16BITS(&p[2]);
if (ndo->ndo_eflag) {
ND_PRINT((ndo, "%s, ethertype %s (0x%04x), length %u: ",
tok2str(chdlc_cast_values, "0x%02x", p[0]),
tok2str(ethertype_values, "Unknown", proto),
proto,
length));
}
length -= CHDLC_HDRLEN;
p += CHDLC_HDRLEN;
switch (proto) {
case ETHERTYPE_IP:
ip_print(ndo, p, length);
break;
case ETHERTYPE_IPV6:
ip6_print(ndo, p, length);
break;
case CHDLC_TYPE_SLARP:
chdlc_slarp_print(ndo, p, length);
break;
#if 0
case CHDLC_TYPE_CDP:
chdlc_cdp_print(p, length);
break;
#endif
case ETHERTYPE_MPLS:
case ETHERTYPE_MPLS_MULTI:
mpls_print(ndo, p, length);
break;
case ETHERTYPE_ISO:
/* is the fudge byte set ? lets verify by spotting ISO headers */
if (length < 2)
goto trunc;
ND_TCHECK_16BITS(p);
if (*(p+1) == 0x81 ||
*(p+1) == 0x82 ||
*(p+1) == 0x83)
isoclns_print(ndo, p + 1, length - 1);
else
isoclns_print(ndo, p, length);
break;
default:
if (!ndo->ndo_eflag)
ND_PRINT((ndo, "unknown CHDLC protocol (0x%04x)", proto));
break;
}
return (CHDLC_HDRLEN);
trunc:
ND_PRINT((ndo, "[|chdlc]"));
return ndo->ndo_snapend - bp;
}
| 167,943 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: EncodedJSValue JSC_HOST_CALL JSWorkerConstructor::constructJSWorker(ExecState* exec)
{
JSWorkerConstructor* jsConstructor = jsCast<JSWorkerConstructor*>(exec->callee());
if (!exec->argumentCount())
return throwVMError(exec, createTypeError(exec, "Not enough arguments"));
UString scriptURL = exec->argument(0).toString(exec)->value(exec);
if (exec->hadException())
return JSValue::encode(JSValue());
DOMWindow* window = asJSDOMWindow(exec->lexicalGlobalObject())->impl();
ExceptionCode ec = 0;
RefPtr<Worker> worker = Worker::create(window->document(), ustringToString(scriptURL), ec);
if (ec) {
setDOMException(exec, ec);
return JSValue::encode(JSValue());
}
return JSValue::encode(asObject(toJS(exec, jsConstructor->globalObject(), worker.release())));
}
Commit Message: [JSC] Implement a helper method createNotEnoughArgumentsError()
https://bugs.webkit.org/show_bug.cgi?id=85102
Reviewed by Geoffrey Garen.
In bug 84787, kbr@ requested to avoid hard-coding
createTypeError(exec, "Not enough arguments") here and there.
This patch implements createNotEnoughArgumentsError(exec)
and uses it in JSC bindings.
c.f. a corresponding bug for V8 bindings is bug 85097.
Source/JavaScriptCore:
* runtime/Error.cpp:
(JSC::createNotEnoughArgumentsError):
(JSC):
* runtime/Error.h:
(JSC):
Source/WebCore:
Test: bindings/scripts/test/TestObj.idl
* bindings/scripts/CodeGeneratorJS.pm: Modified as described above.
(GenerateArgumentsCountCheck):
* bindings/js/JSDataViewCustom.cpp: Ditto.
(WebCore::getDataViewMember):
(WebCore::setDataViewMember):
* bindings/js/JSDeprecatedPeerConnectionCustom.cpp:
(WebCore::JSDeprecatedPeerConnectionConstructor::constructJSDeprecatedPeerConnection):
* bindings/js/JSDirectoryEntryCustom.cpp:
(WebCore::JSDirectoryEntry::getFile):
(WebCore::JSDirectoryEntry::getDirectory):
* bindings/js/JSSharedWorkerCustom.cpp:
(WebCore::JSSharedWorkerConstructor::constructJSSharedWorker):
* bindings/js/JSWebKitMutationObserverCustom.cpp:
(WebCore::JSWebKitMutationObserverConstructor::constructJSWebKitMutationObserver):
(WebCore::JSWebKitMutationObserver::observe):
* bindings/js/JSWorkerCustom.cpp:
(WebCore::JSWorkerConstructor::constructJSWorker):
* bindings/scripts/test/JS/JSFloat64Array.cpp: Updated run-bindings-tests.
(WebCore::jsFloat64ArrayPrototypeFunctionFoo):
* bindings/scripts/test/JS/JSTestActiveDOMObject.cpp:
(WebCore::jsTestActiveDOMObjectPrototypeFunctionExcitingFunction):
(WebCore::jsTestActiveDOMObjectPrototypeFunctionPostMessage):
* bindings/scripts/test/JS/JSTestCustomNamedGetter.cpp:
(WebCore::jsTestCustomNamedGetterPrototypeFunctionAnotherFunction):
* bindings/scripts/test/JS/JSTestEventTarget.cpp:
(WebCore::jsTestEventTargetPrototypeFunctionItem):
(WebCore::jsTestEventTargetPrototypeFunctionAddEventListener):
(WebCore::jsTestEventTargetPrototypeFunctionRemoveEventListener):
(WebCore::jsTestEventTargetPrototypeFunctionDispatchEvent):
* bindings/scripts/test/JS/JSTestInterface.cpp:
(WebCore::JSTestInterfaceConstructor::constructJSTestInterface):
(WebCore::jsTestInterfacePrototypeFunctionSupplementalMethod2):
* bindings/scripts/test/JS/JSTestMediaQueryListListener.cpp:
(WebCore::jsTestMediaQueryListListenerPrototypeFunctionMethod):
* bindings/scripts/test/JS/JSTestNamedConstructor.cpp:
(WebCore::JSTestNamedConstructorNamedConstructor::constructJSTestNamedConstructor):
* bindings/scripts/test/JS/JSTestObj.cpp:
(WebCore::JSTestObjConstructor::constructJSTestObj):
(WebCore::jsTestObjPrototypeFunctionVoidMethodWithArgs):
(WebCore::jsTestObjPrototypeFunctionIntMethodWithArgs):
(WebCore::jsTestObjPrototypeFunctionObjMethodWithArgs):
(WebCore::jsTestObjPrototypeFunctionMethodWithSequenceArg):
(WebCore::jsTestObjPrototypeFunctionMethodReturningSequence):
(WebCore::jsTestObjPrototypeFunctionMethodThatRequiresAllArgsAndThrows):
(WebCore::jsTestObjPrototypeFunctionSerializedValue):
(WebCore::jsTestObjPrototypeFunctionIdbKey):
(WebCore::jsTestObjPrototypeFunctionOptionsObject):
(WebCore::jsTestObjPrototypeFunctionAddEventListener):
(WebCore::jsTestObjPrototypeFunctionRemoveEventListener):
(WebCore::jsTestObjPrototypeFunctionMethodWithNonOptionalArgAndOptionalArg):
(WebCore::jsTestObjPrototypeFunctionMethodWithNonOptionalArgAndTwoOptionalArgs):
(WebCore::jsTestObjPrototypeFunctionMethodWithCallbackArg):
(WebCore::jsTestObjPrototypeFunctionMethodWithNonCallbackArgAndCallbackArg):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod1):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod2):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod3):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod4):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod5):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod6):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod7):
(WebCore::jsTestObjConstructorFunctionClassMethod2):
(WebCore::jsTestObjConstructorFunctionOverloadedMethod11):
(WebCore::jsTestObjConstructorFunctionOverloadedMethod12):
(WebCore::jsTestObjPrototypeFunctionMethodWithUnsignedLongArray):
(WebCore::jsTestObjPrototypeFunctionConvert1):
(WebCore::jsTestObjPrototypeFunctionConvert2):
(WebCore::jsTestObjPrototypeFunctionConvert3):
(WebCore::jsTestObjPrototypeFunctionConvert4):
(WebCore::jsTestObjPrototypeFunctionConvert5):
(WebCore::jsTestObjPrototypeFunctionStrictFunction):
* bindings/scripts/test/JS/JSTestSerializedScriptValueInterface.cpp:
(WebCore::JSTestSerializedScriptValueInterfaceConstructor::constructJSTestSerializedScriptValueInterface):
(WebCore::jsTestSerializedScriptValueInterfacePrototypeFunctionAcceptTransferList):
git-svn-id: svn://svn.chromium.org/blink/trunk@115536 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-20 | EncodedJSValue JSC_HOST_CALL JSWorkerConstructor::constructJSWorker(ExecState* exec)
{
JSWorkerConstructor* jsConstructor = jsCast<JSWorkerConstructor*>(exec->callee());
if (!exec->argumentCount())
return throwVMError(exec, createNotEnoughArgumentsError(exec));
UString scriptURL = exec->argument(0).toString(exec)->value(exec);
if (exec->hadException())
return JSValue::encode(JSValue());
DOMWindow* window = asJSDOMWindow(exec->lexicalGlobalObject())->impl();
ExceptionCode ec = 0;
RefPtr<Worker> worker = Worker::create(window->document(), ustringToString(scriptURL), ec);
if (ec) {
setDOMException(exec, ec);
return JSValue::encode(JSValue());
}
return JSValue::encode(asObject(toJS(exec, jsConstructor->globalObject(), worker.release())));
}
| 170,565 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int OBJ_obj2txt(char *buf, int buf_len, const ASN1_OBJECT *a, int no_name)
{
int i,n=0,len,nid, first, use_bn;
BIGNUM *bl;
unsigned long l;
const unsigned char *p;
char tbuf[DECIMAL_SIZE(i)+DECIMAL_SIZE(l)+2];
if ((a == NULL) || (a->data == NULL)) {
buf[0]='\0';
return(0);
}
if (!no_name && (nid=OBJ_obj2nid(a)) != NID_undef)
{
s=OBJ_nid2ln(nid);
if (s == NULL)
s=OBJ_nid2sn(nid);
if (s)
{
if (buf)
BUF_strlcpy(buf,s,buf_len);
n=strlen(s);
return n;
}
}
len=a->length;
p=a->data;
first = 1;
bl = NULL;
while (len > 0)
{
l=0;
use_bn = 0;
for (;;)
{
unsigned char c = *p++;
len--;
if ((len == 0) && (c & 0x80))
goto err;
if (use_bn)
{
if (!BN_add_word(bl, c & 0x7f))
goto err;
}
else
l |= c & 0x7f;
if (!(c & 0x80))
break;
if (!use_bn && (l > (ULONG_MAX >> 7L)))
{
if (!bl && !(bl = BN_new()))
goto err;
if (!BN_set_word(bl, l))
goto err;
use_bn = 1;
}
if (use_bn)
{
if (!BN_lshift(bl, bl, 7))
goto err;
}
else
l<<=7L;
}
if (first)
{
first = 0;
if (l >= 80)
{
i = 2;
if (use_bn)
{
if (!BN_sub_word(bl, 80))
goto err;
}
else
l -= 80;
}
else
{
i=(int)(l/40);
i=(int)(l/40);
l-=(long)(i*40);
}
if (buf && (buf_len > 0))
{
*buf++ = i + '0';
buf_len--;
}
n++;
if (use_bn)
{
char *bndec;
bndec = BN_bn2dec(bl);
if (!bndec)
goto err;
i = strlen(bndec);
if (buf)
i = strlen(bndec);
if (buf)
{
if (buf_len > 0)
{
*buf++ = '.';
buf_len--;
}
BUF_strlcpy(buf,bndec,buf_len);
buf_len = 0;
}
else
{
buf+=i;
buf_len-=i;
}
}
n++;
n += i;
OPENSSL_free(bndec);
}
else
{
BIO_snprintf(tbuf,sizeof tbuf,".%lu",l);
i=strlen(tbuf);
if (buf && (buf_len > 0))
{
BUF_strlcpy(buf,tbuf,buf_len);
if (i > buf_len)
{
buf += buf_len;
buf_len = 0;
}
else
{
buf+=i;
buf_len-=i;
}
}
n+=i;
l=0;
}
}
if (bl)
BN_free(bl);
return n;
err:
if (bl)
BN_free(bl);
return -1;
}
Commit Message:
CWE ID: CWE-200 | int OBJ_obj2txt(char *buf, int buf_len, const ASN1_OBJECT *a, int no_name)
{
int i,n=0,len,nid, first, use_bn;
BIGNUM *bl;
unsigned long l;
const unsigned char *p;
char tbuf[DECIMAL_SIZE(i)+DECIMAL_SIZE(l)+2];
/* Ensure that, at every state, |buf| is NUL-terminated. */
if (buf && buf_len > 0)
buf[0] = '\0';
if ((a == NULL) || (a->data == NULL))
return(0);
if (!no_name && (nid=OBJ_obj2nid(a)) != NID_undef)
{
s=OBJ_nid2ln(nid);
if (s == NULL)
s=OBJ_nid2sn(nid);
if (s)
{
if (buf)
BUF_strlcpy(buf,s,buf_len);
n=strlen(s);
return n;
}
}
len=a->length;
p=a->data;
first = 1;
bl = NULL;
while (len > 0)
{
l=0;
use_bn = 0;
for (;;)
{
unsigned char c = *p++;
len--;
if ((len == 0) && (c & 0x80))
goto err;
if (use_bn)
{
if (!BN_add_word(bl, c & 0x7f))
goto err;
}
else
l |= c & 0x7f;
if (!(c & 0x80))
break;
if (!use_bn && (l > (ULONG_MAX >> 7L)))
{
if (!bl && !(bl = BN_new()))
goto err;
if (!BN_set_word(bl, l))
goto err;
use_bn = 1;
}
if (use_bn)
{
if (!BN_lshift(bl, bl, 7))
goto err;
}
else
l<<=7L;
}
if (first)
{
first = 0;
if (l >= 80)
{
i = 2;
if (use_bn)
{
if (!BN_sub_word(bl, 80))
goto err;
}
else
l -= 80;
}
else
{
i=(int)(l/40);
i=(int)(l/40);
l-=(long)(i*40);
}
if (buf && (buf_len > 1))
{
*buf++ = i + '0';
*buf = '\0';
buf_len--;
}
n++;
if (use_bn)
{
char *bndec;
bndec = BN_bn2dec(bl);
if (!bndec)
goto err;
i = strlen(bndec);
if (buf)
i = strlen(bndec);
if (buf)
{
if (buf_len > 1)
{
*buf++ = '.';
*buf = '\0';
buf_len--;
}
BUF_strlcpy(buf,bndec,buf_len);
buf_len = 0;
}
else
{
buf+=i;
buf_len-=i;
}
}
n++;
n += i;
OPENSSL_free(bndec);
}
else
{
BIO_snprintf(tbuf,sizeof tbuf,".%lu",l);
i=strlen(tbuf);
if (buf && (buf_len > 0))
{
BUF_strlcpy(buf,tbuf,buf_len);
if (i > buf_len)
{
buf += buf_len;
buf_len = 0;
}
else
{
buf+=i;
buf_len-=i;
}
}
n+=i;
l=0;
}
}
if (bl)
BN_free(bl);
return n;
err:
if (bl)
BN_free(bl);
return -1;
}
| 165,176 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void get_nb10(ut8* dbg_data, SCV_NB10_HEADER* res) {
const int nb10sz = 16;
memcpy (res, dbg_data, nb10sz);
res->file_name = (ut8*) strdup ((const char*) dbg_data + nb10sz);
}
Commit Message: Fix crash in pe
CWE ID: CWE-125 | static void get_nb10(ut8* dbg_data, SCV_NB10_HEADER* res) {
const int nb10sz = 16;
// memcpy (res, dbg_data, nb10sz);
// res->file_name = (ut8*) strdup ((const char*) dbg_data + nb10sz);
}
| 169,229 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: l2tp_avp_print(netdissect_options *ndo, const u_char *dat, int length)
{
u_int len;
const uint16_t *ptr = (const uint16_t *)dat;
uint16_t attr_type;
int hidden = FALSE;
if (length <= 0) {
return;
}
ND_PRINT((ndo, " "));
ND_TCHECK(*ptr); /* Flags & Length */
len = EXTRACT_16BITS(ptr) & L2TP_AVP_HDR_LEN_MASK;
/* If it is not long enough to contain the header, we'll give up. */
if (len < 6)
goto trunc;
/* If it goes past the end of the remaining length of the packet,
we'll give up. */
if (len > (u_int)length)
goto trunc;
/* If it goes past the end of the remaining length of the captured
data, we'll give up. */
ND_TCHECK2(*ptr, len);
/* After this point, no need to worry about truncation */
if (EXTRACT_16BITS(ptr) & L2TP_AVP_HDR_FLAG_MANDATORY) {
ND_PRINT((ndo, "*"));
}
if (EXTRACT_16BITS(ptr) & L2TP_AVP_HDR_FLAG_HIDDEN) {
hidden = TRUE;
ND_PRINT((ndo, "?"));
}
ptr++;
if (EXTRACT_16BITS(ptr)) {
/* Vendor Specific Attribute */
ND_PRINT((ndo, "VENDOR%04x:", EXTRACT_16BITS(ptr))); ptr++;
ND_PRINT((ndo, "ATTR%04x", EXTRACT_16BITS(ptr))); ptr++;
ND_PRINT((ndo, "("));
print_octets(ndo, (const u_char *)ptr, len-6);
ND_PRINT((ndo, ")"));
} else {
/* IETF-defined Attributes */
ptr++;
attr_type = EXTRACT_16BITS(ptr); ptr++;
ND_PRINT((ndo, "%s", tok2str(l2tp_avp2str, "AVP-#%u", attr_type)));
ND_PRINT((ndo, "("));
if (hidden) {
ND_PRINT((ndo, "???"));
} else {
switch (attr_type) {
case L2TP_AVP_MSGTYPE:
l2tp_msgtype_print(ndo, (const u_char *)ptr);
break;
case L2TP_AVP_RESULT_CODE:
l2tp_result_code_print(ndo, (const u_char *)ptr, len-6);
break;
case L2TP_AVP_PROTO_VER:
l2tp_proto_ver_print(ndo, ptr);
break;
case L2TP_AVP_FRAMING_CAP:
l2tp_framing_cap_print(ndo, (const u_char *)ptr);
break;
case L2TP_AVP_BEARER_CAP:
l2tp_bearer_cap_print(ndo, (const u_char *)ptr);
break;
case L2TP_AVP_TIE_BREAKER:
print_octets(ndo, (const u_char *)ptr, 8);
break;
case L2TP_AVP_FIRM_VER:
case L2TP_AVP_ASSND_TUN_ID:
case L2TP_AVP_RECV_WIN_SIZE:
case L2TP_AVP_ASSND_SESS_ID:
print_16bits_val(ndo, ptr);
break;
case L2TP_AVP_HOST_NAME:
case L2TP_AVP_VENDOR_NAME:
case L2TP_AVP_CALLING_NUMBER:
case L2TP_AVP_CALLED_NUMBER:
case L2TP_AVP_SUB_ADDRESS:
case L2TP_AVP_PROXY_AUTH_NAME:
case L2TP_AVP_PRIVATE_GRP_ID:
print_string(ndo, (const u_char *)ptr, len-6);
break;
case L2TP_AVP_CHALLENGE:
case L2TP_AVP_INI_RECV_LCP:
case L2TP_AVP_LAST_SENT_LCP:
case L2TP_AVP_LAST_RECV_LCP:
case L2TP_AVP_PROXY_AUTH_CHAL:
case L2TP_AVP_PROXY_AUTH_RESP:
case L2TP_AVP_RANDOM_VECTOR:
print_octets(ndo, (const u_char *)ptr, len-6);
break;
case L2TP_AVP_Q931_CC:
l2tp_q931_cc_print(ndo, (const u_char *)ptr, len-6);
break;
case L2TP_AVP_CHALLENGE_RESP:
print_octets(ndo, (const u_char *)ptr, 16);
break;
case L2TP_AVP_CALL_SER_NUM:
case L2TP_AVP_MINIMUM_BPS:
case L2TP_AVP_MAXIMUM_BPS:
case L2TP_AVP_TX_CONN_SPEED:
case L2TP_AVP_PHY_CHANNEL_ID:
case L2TP_AVP_RX_CONN_SPEED:
print_32bits_val(ndo, (const uint32_t *)ptr);
break;
case L2TP_AVP_BEARER_TYPE:
l2tp_bearer_type_print(ndo, (const u_char *)ptr);
break;
case L2TP_AVP_FRAMING_TYPE:
l2tp_framing_type_print(ndo, (const u_char *)ptr);
break;
case L2TP_AVP_PACKET_PROC_DELAY:
l2tp_packet_proc_delay_print(ndo);
break;
case L2TP_AVP_PROXY_AUTH_TYPE:
l2tp_proxy_auth_type_print(ndo, (const u_char *)ptr);
break;
case L2TP_AVP_PROXY_AUTH_ID:
l2tp_proxy_auth_id_print(ndo, (const u_char *)ptr);
break;
case L2TP_AVP_CALL_ERRORS:
l2tp_call_errors_print(ndo, (const u_char *)ptr);
break;
case L2TP_AVP_ACCM:
l2tp_accm_print(ndo, (const u_char *)ptr);
break;
case L2TP_AVP_SEQ_REQUIRED:
break; /* No Attribute Value */
case L2TP_AVP_PPP_DISCON_CC:
l2tp_ppp_discon_cc_print(ndo, (const u_char *)ptr, len-6);
break;
default:
break;
}
}
ND_PRINT((ndo, ")"));
}
l2tp_avp_print(ndo, dat+len, length-len);
return;
trunc:
ND_PRINT((ndo, "|..."));
}
Commit Message: CVE-2017-13006/L2TP: Check whether an AVP's content exceeds the AVP length.
It's not good enough to check whether all the data specified by the AVP
length was captured - you also have to check whether that length is
large enough for all the required data in the AVP.
This fixes a buffer over-read discovered by Yannick Formaggio.
Add a test using the capture file supplied by the reporter(s).
CWE ID: CWE-125 | l2tp_avp_print(netdissect_options *ndo, const u_char *dat, int length)
{
u_int len;
const uint16_t *ptr = (const uint16_t *)dat;
uint16_t attr_type;
int hidden = FALSE;
if (length <= 0) {
return;
}
ND_PRINT((ndo, " "));
ND_TCHECK(*ptr); /* Flags & Length */
len = EXTRACT_16BITS(ptr) & L2TP_AVP_HDR_LEN_MASK;
/* If it is not long enough to contain the header, we'll give up. */
if (len < 6)
goto trunc;
/* If it goes past the end of the remaining length of the packet,
we'll give up. */
if (len > (u_int)length)
goto trunc;
/* If it goes past the end of the remaining length of the captured
data, we'll give up. */
ND_TCHECK2(*ptr, len);
/*
* After this point, we don't need to check whether we go past
* the length of the captured data; however, we *do* need to
* check whether we go past the end of the AVP.
*/
if (EXTRACT_16BITS(ptr) & L2TP_AVP_HDR_FLAG_MANDATORY) {
ND_PRINT((ndo, "*"));
}
if (EXTRACT_16BITS(ptr) & L2TP_AVP_HDR_FLAG_HIDDEN) {
hidden = TRUE;
ND_PRINT((ndo, "?"));
}
ptr++;
if (EXTRACT_16BITS(ptr)) {
/* Vendor Specific Attribute */
ND_PRINT((ndo, "VENDOR%04x:", EXTRACT_16BITS(ptr))); ptr++;
ND_PRINT((ndo, "ATTR%04x", EXTRACT_16BITS(ptr))); ptr++;
ND_PRINT((ndo, "("));
print_octets(ndo, (const u_char *)ptr, len-6);
ND_PRINT((ndo, ")"));
} else {
/* IETF-defined Attributes */
ptr++;
attr_type = EXTRACT_16BITS(ptr); ptr++;
ND_PRINT((ndo, "%s", tok2str(l2tp_avp2str, "AVP-#%u", attr_type)));
ND_PRINT((ndo, "("));
if (hidden) {
ND_PRINT((ndo, "???"));
} else {
switch (attr_type) {
case L2TP_AVP_MSGTYPE:
l2tp_msgtype_print(ndo, (const u_char *)ptr, len-6);
break;
case L2TP_AVP_RESULT_CODE:
l2tp_result_code_print(ndo, (const u_char *)ptr, len-6);
break;
case L2TP_AVP_PROTO_VER:
l2tp_proto_ver_print(ndo, ptr, len-6);
break;
case L2TP_AVP_FRAMING_CAP:
l2tp_framing_cap_print(ndo, (const u_char *)ptr, len-6);
break;
case L2TP_AVP_BEARER_CAP:
l2tp_bearer_cap_print(ndo, (const u_char *)ptr, len-6);
break;
case L2TP_AVP_TIE_BREAKER:
if (len-6 < 8) {
ND_PRINT((ndo, "AVP too short"));
break;
}
print_octets(ndo, (const u_char *)ptr, 8);
break;
case L2TP_AVP_FIRM_VER:
case L2TP_AVP_ASSND_TUN_ID:
case L2TP_AVP_RECV_WIN_SIZE:
case L2TP_AVP_ASSND_SESS_ID:
if (len-6 < 2) {
ND_PRINT((ndo, "AVP too short"));
break;
}
print_16bits_val(ndo, ptr);
break;
case L2TP_AVP_HOST_NAME:
case L2TP_AVP_VENDOR_NAME:
case L2TP_AVP_CALLING_NUMBER:
case L2TP_AVP_CALLED_NUMBER:
case L2TP_AVP_SUB_ADDRESS:
case L2TP_AVP_PROXY_AUTH_NAME:
case L2TP_AVP_PRIVATE_GRP_ID:
print_string(ndo, (const u_char *)ptr, len-6);
break;
case L2TP_AVP_CHALLENGE:
case L2TP_AVP_INI_RECV_LCP:
case L2TP_AVP_LAST_SENT_LCP:
case L2TP_AVP_LAST_RECV_LCP:
case L2TP_AVP_PROXY_AUTH_CHAL:
case L2TP_AVP_PROXY_AUTH_RESP:
case L2TP_AVP_RANDOM_VECTOR:
print_octets(ndo, (const u_char *)ptr, len-6);
break;
case L2TP_AVP_Q931_CC:
l2tp_q931_cc_print(ndo, (const u_char *)ptr, len-6);
break;
case L2TP_AVP_CHALLENGE_RESP:
if (len-6 < 16) {
ND_PRINT((ndo, "AVP too short"));
break;
}
print_octets(ndo, (const u_char *)ptr, 16);
break;
case L2TP_AVP_CALL_SER_NUM:
case L2TP_AVP_MINIMUM_BPS:
case L2TP_AVP_MAXIMUM_BPS:
case L2TP_AVP_TX_CONN_SPEED:
case L2TP_AVP_PHY_CHANNEL_ID:
case L2TP_AVP_RX_CONN_SPEED:
if (len-6 < 4) {
ND_PRINT((ndo, "AVP too short"));
break;
}
print_32bits_val(ndo, (const uint32_t *)ptr);
break;
case L2TP_AVP_BEARER_TYPE:
l2tp_bearer_type_print(ndo, (const u_char *)ptr, len-6);
break;
case L2TP_AVP_FRAMING_TYPE:
l2tp_framing_type_print(ndo, (const u_char *)ptr, len-6);
break;
case L2TP_AVP_PACKET_PROC_DELAY:
l2tp_packet_proc_delay_print(ndo);
break;
case L2TP_AVP_PROXY_AUTH_TYPE:
l2tp_proxy_auth_type_print(ndo, (const u_char *)ptr, len-6);
break;
case L2TP_AVP_PROXY_AUTH_ID:
l2tp_proxy_auth_id_print(ndo, (const u_char *)ptr, len-6);
break;
case L2TP_AVP_CALL_ERRORS:
l2tp_call_errors_print(ndo, (const u_char *)ptr, len-6);
break;
case L2TP_AVP_ACCM:
l2tp_accm_print(ndo, (const u_char *)ptr, len-6);
break;
case L2TP_AVP_SEQ_REQUIRED:
break; /* No Attribute Value */
case L2TP_AVP_PPP_DISCON_CC:
l2tp_ppp_discon_cc_print(ndo, (const u_char *)ptr, len-6);
break;
default:
break;
}
}
ND_PRINT((ndo, ")"));
}
l2tp_avp_print(ndo, dat+len, length-len);
return;
trunc:
ND_PRINT((ndo, "|..."));
}
| 167,890 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static inline void uipc_wakeup_locked(void)
{
char sig_on = 1;
BTIF_TRACE_EVENT("UIPC SEND WAKE UP");
send(uipc_main.signal_fds[1], &sig_on, sizeof(sig_on), 0);
}
Commit Message: DO NOT MERGE Fix potential DoS caused by delivering signal to BT process
Bug: 28885210
Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360
Conflicts:
btif/co/bta_hh_co.c
btif/src/btif_core.c
Merge conflict resolution of ag/1161415 (referencing ag/1164670)
- Directly into mnc-mr2-release
CWE ID: CWE-284 | static inline void uipc_wakeup_locked(void)
{
char sig_on = 1;
BTIF_TRACE_EVENT("UIPC SEND WAKE UP");
TEMP_FAILURE_RETRY(send(uipc_main.signal_fds[1], &sig_on, sizeof(sig_on), 0));
}
| 173,499 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: TestCompletionCallback()
: callback_(base::Bind(&TestCompletionCallback::SetResult,
base::Unretained(this))) {}
Commit Message: Update helper classes in usb_device_handle_unittest for OnceCallback
Helper classes in usb_device_handle_unittest.cc don't fit to OnceCallback
migration, as they are copied and passed to others.
This CL updates them to pass new callbacks for each use to avoid the
copy of callbacks.
Bug: 714018
Change-Id: Ifb70901439ae92b6b049b84534283c39ebc40ee0
Reviewed-on: https://chromium-review.googlesource.com/527549
Reviewed-by: Ken Rockot <[email protected]>
Commit-Queue: Taiju Tsuiki <[email protected]>
Cr-Commit-Position: refs/heads/master@{#478549}
CWE ID: | TestCompletionCallback()
| 171,975 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void WebContentsImpl::DidProceedOnInterstitial() {
DCHECK(!(ShowingInterstitialPage() &&
GetRenderManager()->interstitial_page()->pause_throbber()));
if (ShowingInterstitialPage() && frame_tree_.IsLoading())
LoadingStateChanged(true, true, nullptr);
}
Commit Message: Don't show current RenderWidgetHostView while interstitial is showing.
Also moves interstitial page tracking from RenderFrameHostManager to
WebContents, since interstitial pages are not frame-specific. This was
necessary for subframes to detect if an interstitial page is showing.
BUG=729105
TEST=See comment 13 of bug for repro steps
CQ_INCLUDE_TRYBOTS=master.tryserver.chromium.linux:linux_site_isolation
Review-Url: https://codereview.chromium.org/2938313002
Cr-Commit-Position: refs/heads/master@{#480117}
CWE ID: CWE-20 | void WebContentsImpl::DidProceedOnInterstitial() {
DCHECK(!(ShowingInterstitialPage() && interstitial_page_->pause_throbber()));
if (ShowingInterstitialPage() && frame_tree_.IsLoading())
LoadingStateChanged(true, true, nullptr);
}
| 172,326 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
int r;
gfn_t base_gfn;
unsigned long npages;
unsigned long i;
struct kvm_memory_slot *memslot;
struct kvm_memory_slot old, new;
struct kvm_memslots *slots, *old_memslots;
r = -EINVAL;
/* General sanity checks */
if (mem->memory_size & (PAGE_SIZE - 1))
goto out;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
goto out;
if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
goto out;
if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
goto out;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out;
memslot = &kvm->memslots->memslots[mem->slot];
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
npages = mem->memory_size >> PAGE_SHIFT;
r = -EINVAL;
if (npages > KVM_MEM_MAX_NR_PAGES)
goto out;
if (!npages)
mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
new = old = *memslot;
new.id = mem->slot;
new.base_gfn = base_gfn;
new.npages = npages;
new.flags = mem->flags;
/* Disallow changing a memory slot's size. */
r = -EINVAL;
if (npages && old.npages && npages != old.npages)
goto out_free;
/* Check for overlaps */
r = -EEXIST;
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
if (s == memslot || !s->npages)
continue;
if (!((base_gfn + npages <= s->base_gfn) ||
(base_gfn >= s->base_gfn + s->npages)))
goto out_free;
}
/* Free page dirty bitmap if unneeded */
if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
new.dirty_bitmap = NULL;
r = -ENOMEM;
/* Allocate if a slot is being created */
#ifndef CONFIG_S390
if (npages && !new.rmap) {
new.rmap = vzalloc(npages * sizeof(*new.rmap));
if (!new.rmap)
goto out_free;
new.user_alloc = user_alloc;
new.userspace_addr = mem->userspace_addr;
}
if (!npages)
goto skip_lpage;
for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
unsigned long ugfn;
unsigned long j;
int lpages;
int level = i + 2;
/* Avoid unused variable warning if no large pages */
(void)level;
if (new.lpage_info[i])
continue;
lpages = 1 + ((base_gfn + npages - 1)
>> KVM_HPAGE_GFN_SHIFT(level));
lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level);
new.lpage_info[i] = vzalloc(lpages * sizeof(*new.lpage_info[i]));
if (!new.lpage_info[i])
goto out_free;
if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
new.lpage_info[i][0].write_count = 1;
if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
new.lpage_info[i][lpages - 1].write_count = 1;
ugfn = new.userspace_addr >> PAGE_SHIFT;
/*
* If the gfn and userspace address are not aligned wrt each
* other, or if explicitly asked to, disable large page
* support for this slot
*/
if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
!largepages_enabled)
for (j = 0; j < lpages; ++j)
new.lpage_info[i][j].write_count = 1;
}
skip_lpage:
/* Allocate page dirty bitmap if needed */
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
if (kvm_create_dirty_bitmap(&new) < 0)
goto out_free;
/* destroy any largepage mappings for dirty tracking */
}
#else /* not defined CONFIG_S390 */
new.user_alloc = user_alloc;
if (user_alloc)
new.userspace_addr = mem->userspace_addr;
#endif /* not defined CONFIG_S390 */
if (!npages) {
r = -ENOMEM;
slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
if (!slots)
goto out_free;
memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
if (mem->slot >= slots->nmemslots)
slots->nmemslots = mem->slot + 1;
slots->generation++;
slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
/* From this point no new shadow pages pointing to a deleted
* memslot will be created.
*
* validation of sp->gfn happens in:
* - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
* - kvm_is_visible_gfn (mmu_check_roots)
*/
kvm_arch_flush_shadow(kvm);
kfree(old_memslots);
}
r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
if (r)
goto out_free;
/* map the pages in iommu page table */
if (npages) {
r = kvm_iommu_map_pages(kvm, &new);
if (r)
goto out_free;
}
r = -ENOMEM;
slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
if (!slots)
goto out_free;
memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
if (mem->slot >= slots->nmemslots)
slots->nmemslots = mem->slot + 1;
slots->generation++;
/* actual memory is freed via old in kvm_free_physmem_slot below */
if (!npages) {
new.rmap = NULL;
new.dirty_bitmap = NULL;
for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i)
new.lpage_info[i] = NULL;
}
slots->memslots[mem->slot] = new;
old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
kvm_free_physmem_slot(&old, &new);
kfree(old_memslots);
return 0;
out_free:
kvm_free_physmem_slot(&new, &old);
out:
return r;
}
Commit Message: KVM: Validate userspace_addr of memslot when registered
This way, we can avoid checking the user space address many times when
we read the guest memory.
Although we can do the same for write if we check which slots are
writable, we do not care write now: reading the guest memory happens
more often than writing.
[avi: change VERIFY_READ to VERIFY_WRITE]
Signed-off-by: Takuya Yoshikawa <[email protected]>
Signed-off-by: Avi Kivity <[email protected]>
CWE ID: CWE-20 | int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
int r;
gfn_t base_gfn;
unsigned long npages;
unsigned long i;
struct kvm_memory_slot *memslot;
struct kvm_memory_slot old, new;
struct kvm_memslots *slots, *old_memslots;
r = -EINVAL;
/* General sanity checks */
if (mem->memory_size & (PAGE_SIZE - 1))
goto out;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
goto out;
/* We can read the guest memory with __xxx_user() later on. */
if (user_alloc &&
((mem->userspace_addr & (PAGE_SIZE - 1)) ||
!access_ok(VERIFY_WRITE, mem->userspace_addr, mem->memory_size)))
goto out;
if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
goto out;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out;
memslot = &kvm->memslots->memslots[mem->slot];
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
npages = mem->memory_size >> PAGE_SHIFT;
r = -EINVAL;
if (npages > KVM_MEM_MAX_NR_PAGES)
goto out;
if (!npages)
mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
new = old = *memslot;
new.id = mem->slot;
new.base_gfn = base_gfn;
new.npages = npages;
new.flags = mem->flags;
/* Disallow changing a memory slot's size. */
r = -EINVAL;
if (npages && old.npages && npages != old.npages)
goto out_free;
/* Check for overlaps */
r = -EEXIST;
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
if (s == memslot || !s->npages)
continue;
if (!((base_gfn + npages <= s->base_gfn) ||
(base_gfn >= s->base_gfn + s->npages)))
goto out_free;
}
/* Free page dirty bitmap if unneeded */
if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
new.dirty_bitmap = NULL;
r = -ENOMEM;
/* Allocate if a slot is being created */
#ifndef CONFIG_S390
if (npages && !new.rmap) {
new.rmap = vzalloc(npages * sizeof(*new.rmap));
if (!new.rmap)
goto out_free;
new.user_alloc = user_alloc;
new.userspace_addr = mem->userspace_addr;
}
if (!npages)
goto skip_lpage;
for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
unsigned long ugfn;
unsigned long j;
int lpages;
int level = i + 2;
/* Avoid unused variable warning if no large pages */
(void)level;
if (new.lpage_info[i])
continue;
lpages = 1 + ((base_gfn + npages - 1)
>> KVM_HPAGE_GFN_SHIFT(level));
lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level);
new.lpage_info[i] = vzalloc(lpages * sizeof(*new.lpage_info[i]));
if (!new.lpage_info[i])
goto out_free;
if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
new.lpage_info[i][0].write_count = 1;
if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
new.lpage_info[i][lpages - 1].write_count = 1;
ugfn = new.userspace_addr >> PAGE_SHIFT;
/*
* If the gfn and userspace address are not aligned wrt each
* other, or if explicitly asked to, disable large page
* support for this slot
*/
if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
!largepages_enabled)
for (j = 0; j < lpages; ++j)
new.lpage_info[i][j].write_count = 1;
}
skip_lpage:
/* Allocate page dirty bitmap if needed */
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
if (kvm_create_dirty_bitmap(&new) < 0)
goto out_free;
/* destroy any largepage mappings for dirty tracking */
}
#else /* not defined CONFIG_S390 */
new.user_alloc = user_alloc;
if (user_alloc)
new.userspace_addr = mem->userspace_addr;
#endif /* not defined CONFIG_S390 */
if (!npages) {
r = -ENOMEM;
slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
if (!slots)
goto out_free;
memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
if (mem->slot >= slots->nmemslots)
slots->nmemslots = mem->slot + 1;
slots->generation++;
slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
/* From this point no new shadow pages pointing to a deleted
* memslot will be created.
*
* validation of sp->gfn happens in:
* - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
* - kvm_is_visible_gfn (mmu_check_roots)
*/
kvm_arch_flush_shadow(kvm);
kfree(old_memslots);
}
r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
if (r)
goto out_free;
/* map the pages in iommu page table */
if (npages) {
r = kvm_iommu_map_pages(kvm, &new);
if (r)
goto out_free;
}
r = -ENOMEM;
slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
if (!slots)
goto out_free;
memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
if (mem->slot >= slots->nmemslots)
slots->nmemslots = mem->slot + 1;
slots->generation++;
/* actual memory is freed via old in kvm_free_physmem_slot below */
if (!npages) {
new.rmap = NULL;
new.dirty_bitmap = NULL;
for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i)
new.lpage_info[i] = NULL;
}
slots->memslots[mem->slot] = new;
old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
kvm_free_physmem_slot(&old, &new);
kfree(old_memslots);
return 0;
out_free:
kvm_free_physmem_slot(&new, &old);
out:
return r;
}
| 166,099 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool SoftVPX::outputBuffers(bool flushDecoder, bool display, bool eos, bool *portWillReset) {
List<BufferInfo *> &inQueue = getPortQueue(0);
List<BufferInfo *> &outQueue = getPortQueue(1);
BufferInfo *outInfo = NULL;
OMX_BUFFERHEADERTYPE *outHeader = NULL;
vpx_codec_iter_t iter = NULL;
if (flushDecoder && mFrameParallelMode) {
if (vpx_codec_decode((vpx_codec_ctx_t *)mCtx, NULL, 0, NULL, 0)) {
ALOGE("Failed to flush on2 decoder.");
return false;
}
}
if (!display) {
if (!flushDecoder) {
ALOGE("Invalid operation.");
return false;
}
while ((mImg = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter))) {
}
return true;
}
while (!outQueue.empty()) {
if (mImg == NULL) {
mImg = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter);
if (mImg == NULL) {
break;
}
}
uint32_t width = mImg->d_w;
uint32_t height = mImg->d_h;
outInfo = *outQueue.begin();
outHeader = outInfo->mHeader;
CHECK_EQ(mImg->fmt, VPX_IMG_FMT_I420);
handlePortSettingsChange(portWillReset, width, height);
if (*portWillReset) {
return true;
}
outHeader->nOffset = 0;
outHeader->nFlags = 0;
outHeader->nFilledLen = (outputBufferWidth() * outputBufferHeight() * 3) / 2;
outHeader->nTimeStamp = *(OMX_TICKS *)mImg->user_priv;
uint8_t *dst = outHeader->pBuffer;
const uint8_t *srcY = (const uint8_t *)mImg->planes[VPX_PLANE_Y];
const uint8_t *srcU = (const uint8_t *)mImg->planes[VPX_PLANE_U];
const uint8_t *srcV = (const uint8_t *)mImg->planes[VPX_PLANE_V];
size_t srcYStride = mImg->stride[VPX_PLANE_Y];
size_t srcUStride = mImg->stride[VPX_PLANE_U];
size_t srcVStride = mImg->stride[VPX_PLANE_V];
copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
mImg = NULL;
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
outInfo = NULL;
notifyFillBufferDone(outHeader);
outHeader = NULL;
}
if (!eos) {
return true;
}
if (!outQueue.empty()) {
outInfo = *outQueue.begin();
outQueue.erase(outQueue.begin());
outHeader = outInfo->mHeader;
outHeader->nTimeStamp = 0;
outHeader->nFilledLen = 0;
outHeader->nFlags = OMX_BUFFERFLAG_EOS;
outInfo->mOwnedByUs = false;
notifyFillBufferDone(outHeader);
mEOSStatus = OUTPUT_FRAMES_FLUSHED;
}
return true;
}
Commit Message: Add VPX output buffer size check
and handle dead observers more gracefully
Bug: 27597103
Change-Id: Id7acb25d5ef69b197da15ec200a9e4f9e7b03518
CWE ID: CWE-264 | bool SoftVPX::outputBuffers(bool flushDecoder, bool display, bool eos, bool *portWillReset) {
List<BufferInfo *> &inQueue = getPortQueue(0);
List<BufferInfo *> &outQueue = getPortQueue(1);
BufferInfo *outInfo = NULL;
OMX_BUFFERHEADERTYPE *outHeader = NULL;
vpx_codec_iter_t iter = NULL;
if (flushDecoder && mFrameParallelMode) {
if (vpx_codec_decode((vpx_codec_ctx_t *)mCtx, NULL, 0, NULL, 0)) {
ALOGE("Failed to flush on2 decoder.");
return false;
}
}
if (!display) {
if (!flushDecoder) {
ALOGE("Invalid operation.");
return false;
}
while ((mImg = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter))) {
}
return true;
}
while (!outQueue.empty()) {
if (mImg == NULL) {
mImg = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter);
if (mImg == NULL) {
break;
}
}
uint32_t width = mImg->d_w;
uint32_t height = mImg->d_h;
outInfo = *outQueue.begin();
outHeader = outInfo->mHeader;
CHECK_EQ(mImg->fmt, VPX_IMG_FMT_I420);
handlePortSettingsChange(portWillReset, width, height);
if (*portWillReset) {
return true;
}
outHeader->nOffset = 0;
outHeader->nFlags = 0;
outHeader->nFilledLen = (outputBufferWidth() * outputBufferHeight() * 3) / 2;
outHeader->nTimeStamp = *(OMX_TICKS *)mImg->user_priv;
if (outHeader->nAllocLen >= outHeader->nFilledLen) {
uint8_t *dst = outHeader->pBuffer;
const uint8_t *srcY = (const uint8_t *)mImg->planes[VPX_PLANE_Y];
const uint8_t *srcU = (const uint8_t *)mImg->planes[VPX_PLANE_U];
const uint8_t *srcV = (const uint8_t *)mImg->planes[VPX_PLANE_V];
size_t srcYStride = mImg->stride[VPX_PLANE_Y];
size_t srcUStride = mImg->stride[VPX_PLANE_U];
size_t srcVStride = mImg->stride[VPX_PLANE_V];
copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
} else {
ALOGE("b/27597103, buffer too small");
android_errorWriteLog(0x534e4554, "27597103");
outHeader->nFilledLen = 0;
}
mImg = NULL;
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
outInfo = NULL;
notifyFillBufferDone(outHeader);
outHeader = NULL;
}
if (!eos) {
return true;
}
if (!outQueue.empty()) {
outInfo = *outQueue.begin();
outQueue.erase(outQueue.begin());
outHeader = outInfo->mHeader;
outHeader->nTimeStamp = 0;
outHeader->nFilledLen = 0;
outHeader->nFlags = OMX_BUFFERFLAG_EOS;
outInfo->mOwnedByUs = false;
notifyFillBufferDone(outHeader);
mEOSStatus = OUTPUT_FRAMES_FLUSHED;
}
return true;
}
| 173,880 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int jpc_ppm_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out)
{
jpc_ppm_t *ppm = &ms->parms.ppm;
/* Eliminate compiler warning about unused variables. */
cstate = 0;
if (JAS_CAST(uint, jas_stream_write(out, (char *) ppm->data, ppm->len)) != ppm->len) {
return -1;
}
return 0;
}
Commit Message: The generation of the configuration file jas_config.h has been completely
reworked in order to avoid pollution of the global namespace.
Some problematic types like uchar, ulong, and friends have been replaced
with names with a jas_ prefix.
An option max_samples has been added to the BMP and JPEG decoders to
restrict the maximum size of image that they can decode. This change
was made as a (possibly temporary) fix to address security concerns.
A max_samples command-line option has also been added to imginfo.
Whether an image component (for jas_image_t) is stored in memory or on
disk is now based on the component size (rather than the image size).
Some debug log message were added.
Some new integer overflow checks were added.
Some new safe integer add/multiply functions were added.
More pre-C99 cruft was removed. JasPer has numerous "hacks" to
handle pre-C99 compilers. JasPer now assumes C99 support. So, this
pre-C99 cruft is unnecessary and can be removed.
The regression jasper-doublefree-mem_close.jpg has been re-enabled.
Theoretically, it should work more predictably now.
CWE ID: CWE-190 | static int jpc_ppm_putparms(jpc_ms_t *ms, jpc_cstate_t *cstate, jas_stream_t *out)
{
jpc_ppm_t *ppm = &ms->parms.ppm;
/* Eliminate compiler warning about unused variables. */
cstate = 0;
if (JAS_CAST(jas_uint, jas_stream_write(out, (char *) ppm->data, ppm->len)) != ppm->len) {
return -1;
}
return 0;
}
| 168,718 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static Image *ReadYUVImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*chroma_image,
*image,
*resize_image;
InterlaceType
interlace;
MagickBooleanType
status;
register const Quantum
*chroma_pixels;
register ssize_t
x;
register Quantum
*q;
register unsigned char
*p;
ssize_t
count,
horizontal_factor,
vertical_factor,
y;
size_t
length,
quantum;
unsigned char
*scanline;
/*
Allocate image structure.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
if ((image->columns == 0) || (image->rows == 0))
ThrowReaderException(OptionError,"MustSpecifyImageSize");
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
quantum=(ssize_t) (image->depth <= 8 ? 1 : 2);
interlace=image_info->interlace;
horizontal_factor=2;
vertical_factor=2;
if (image_info->sampling_factor != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(image_info->sampling_factor,&geometry_info);
horizontal_factor=(ssize_t) geometry_info.rho;
vertical_factor=(ssize_t) geometry_info.sigma;
if ((flags & SigmaValue) == 0)
vertical_factor=horizontal_factor;
if ((horizontal_factor != 1) && (horizontal_factor != 2) &&
(vertical_factor != 1) && (vertical_factor != 2))
ThrowReaderException(CorruptImageError,"UnexpectedSamplingFactor");
}
if ((interlace == UndefinedInterlace) ||
((interlace == NoInterlace) && (vertical_factor == 2)))
{
interlace=NoInterlace; /* CCIR 4:2:2 */
if (vertical_factor == 2)
interlace=PlaneInterlace; /* CCIR 4:1:1 */
}
if (interlace != PartitionInterlace)
{
/*
Open image file.
*/
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
if (DiscardBlobBytes(image,(MagickSizeType) image->offset) == MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
/*
Allocate memory for a scanline.
*/
if (interlace == NoInterlace)
scanline=(unsigned char *) AcquireQuantumMemory((size_t) (2UL*
image->columns+2UL),(size_t) quantum*sizeof(*scanline));
else
scanline=(unsigned char *) AcquireQuantumMemory(image->columns,
(size_t) quantum*sizeof(*scanline));
if (scanline == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
status=MagickTrue;
do
{
chroma_image=CloneImage(image,(image->columns+horizontal_factor-1)/
horizontal_factor,(image->rows+vertical_factor-1)/vertical_factor,
MagickTrue,exception);
if (chroma_image == (Image *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
/*
Convert raster image to pixel packets.
*/
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
break;
if (interlace == PartitionInterlace)
{
AppendImageFormat("Y",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*chroma_pixels;
if (interlace == NoInterlace)
{
if ((y > 0) || (GetPreviousImageInList(image) == (Image *) NULL))
{
length=2*quantum*image->columns;
count=ReadBlob(image,length,scanline);
if (count != (ssize_t) length)
{
status=MagickFalse;
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
p=scanline;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
chroma_pixels=QueueAuthenticPixels(chroma_image,0,y,
chroma_image->columns,1,exception);
if (chroma_pixels == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x+=2)
{
SetPixelRed(chroma_image,0,chroma_pixels);
if (quantum == 1)
SetPixelGreen(chroma_image,ScaleCharToQuantum(*p++),
chroma_pixels);
else
{
SetPixelGreen(chroma_image,ScaleShortToQuantum(((*p) << 8) |
*(p+1)),chroma_pixels);
p+=2;
}
if (quantum == 1)
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
else
{
SetPixelRed(image,ScaleShortToQuantum(((*p) << 8) | *(p+1)),q);
p+=2;
}
SetPixelGreen(image,0,q);
SetPixelBlue(image,0,q);
q+=GetPixelChannels(image);
SetPixelGreen(image,0,q);
SetPixelBlue(image,0,q);
if (quantum == 1)
SetPixelBlue(chroma_image,ScaleCharToQuantum(*p++),chroma_pixels);
else
{
SetPixelBlue(chroma_image,ScaleShortToQuantum(((*p) << 8) |
*(p+1)),chroma_pixels);
p+=2;
}
if (quantum == 1)
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
else
{
SetPixelRed(image,ScaleShortToQuantum(((*p) << 8) | *(p+1)),q);
p+=2;
}
chroma_pixels+=GetPixelChannels(chroma_image);
q+=GetPixelChannels(image);
}
}
else
{
if ((y > 0) || (GetPreviousImageInList(image) == (Image *) NULL))
{
length=quantum*image->columns;
count=ReadBlob(image,length,scanline);
if (count != (ssize_t) length)
{
status=MagickFalse;
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
p=scanline;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (quantum == 1)
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
else
{
SetPixelRed(image,ScaleShortToQuantum(((*p) << 8) | *(p+1)),q);
p+=2;
}
SetPixelGreen(image,0,q);
SetPixelBlue(image,0,q);
q+=GetPixelChannels(image);
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (interlace == NoInterlace)
if (SyncAuthenticPixels(chroma_image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
if (interlace == PartitionInterlace)
{
(void) CloseBlob(image);
AppendImageFormat("U",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (interlace != NoInterlace)
{
for (y=0; y < (ssize_t) chroma_image->rows; y++)
{
length=quantum*chroma_image->columns;
count=ReadBlob(image,length,scanline);
if (count != (ssize_t) length)
{
status=MagickFalse;
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
p=scanline;
q=QueueAuthenticPixels(chroma_image,0,y,chroma_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) chroma_image->columns; x++)
{
SetPixelRed(chroma_image,0,q);
if (quantum == 1)
SetPixelGreen(chroma_image,ScaleCharToQuantum(*p++),q);
else
{
SetPixelGreen(chroma_image,ScaleShortToQuantum(((*p) << 8) |
*(p+1)),q);
p+=2;
}
SetPixelBlue(chroma_image,0,q);
q+=GetPixelChannels(chroma_image);
}
if (SyncAuthenticPixels(chroma_image,exception) == MagickFalse)
break;
}
if (interlace == PartitionInterlace)
{
(void) CloseBlob(image);
AppendImageFormat("V",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
}
for (y=0; y < (ssize_t) chroma_image->rows; y++)
{
length=quantum*chroma_image->columns;
count=ReadBlob(image,length,scanline);
if (count != (ssize_t) length)
{
status=MagickFalse;
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
p=scanline;
q=GetAuthenticPixels(chroma_image,0,y,chroma_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) chroma_image->columns; x++)
{
if (quantum == 1)
SetPixelBlue(chroma_image,ScaleCharToQuantum(*p++),q);
else
{
SetPixelBlue(chroma_image,ScaleShortToQuantum(((*p) << 8) |
*(p+1)),q);
p+=2;
}
q+=GetPixelChannels(chroma_image);
}
if (SyncAuthenticPixels(chroma_image,exception) == MagickFalse)
break;
}
}
/*
Scale image.
*/
resize_image=ResizeImage(chroma_image,image->columns,image->rows,
TriangleFilter,exception);
chroma_image=DestroyImage(chroma_image);
if (resize_image == (Image *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (y=0; y < (ssize_t) image->rows; y++)
{
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
chroma_pixels=GetVirtualPixels(resize_image,0,y,resize_image->columns,1,
exception);
if ((q == (Quantum *) NULL) ||
(chroma_pixels == (const Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGreen(image,GetPixelGreen(resize_image,chroma_pixels),q);
SetPixelBlue(image,GetPixelBlue(resize_image,chroma_pixels),q);
chroma_pixels+=GetPixelChannels(resize_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
resize_image=DestroyImage(resize_image);
if (SetImageColorspace(image,YCbCrColorspace,exception) == MagickFalse)
break;
if (interlace == PartitionInterlace)
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if (interlace == NoInterlace)
count=ReadBlob(image,(size_t) (2*quantum*image->columns),scanline);
else
count=ReadBlob(image,(size_t) quantum*image->columns,scanline);
if (count != 0)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (count != 0);
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
(void) CloseBlob(image);
if (status == MagickFalse)
return(DestroyImageList(image));
return(GetFirstImageInList(image));
}
Commit Message: fix multiple memory leak in ReadYUVImage
CWE ID: CWE-772 | static Image *ReadYUVImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*chroma_image,
*image,
*resize_image;
InterlaceType
interlace;
MagickBooleanType
status;
register const Quantum
*chroma_pixels;
register ssize_t
x;
register Quantum
*q;
register unsigned char
*p;
ssize_t
count,
horizontal_factor,
vertical_factor,
y;
size_t
length,
quantum;
unsigned char
*scanline;
/*
Allocate image structure.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
if ((image->columns == 0) || (image->rows == 0))
ThrowReaderException(OptionError,"MustSpecifyImageSize");
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
quantum=(ssize_t) (image->depth <= 8 ? 1 : 2);
interlace=image_info->interlace;
horizontal_factor=2;
vertical_factor=2;
if (image_info->sampling_factor != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(image_info->sampling_factor,&geometry_info);
horizontal_factor=(ssize_t) geometry_info.rho;
vertical_factor=(ssize_t) geometry_info.sigma;
if ((flags & SigmaValue) == 0)
vertical_factor=horizontal_factor;
if ((horizontal_factor != 1) && (horizontal_factor != 2) &&
(vertical_factor != 1) && (vertical_factor != 2))
ThrowReaderException(CorruptImageError,"UnexpectedSamplingFactor");
}
if ((interlace == UndefinedInterlace) ||
((interlace == NoInterlace) && (vertical_factor == 2)))
{
interlace=NoInterlace; /* CCIR 4:2:2 */
if (vertical_factor == 2)
interlace=PlaneInterlace; /* CCIR 4:1:1 */
}
if (interlace != PartitionInterlace)
{
/*
Open image file.
*/
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
if (DiscardBlobBytes(image,(MagickSizeType) image->offset) == MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
/*
Allocate memory for a scanline.
*/
if (interlace == NoInterlace)
scanline=(unsigned char *) AcquireQuantumMemory((size_t) (2UL*
image->columns+2UL),(size_t) quantum*sizeof(*scanline));
else
scanline=(unsigned char *) AcquireQuantumMemory(image->columns,
(size_t) quantum*sizeof(*scanline));
if (scanline == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
status=MagickTrue;
do
{
chroma_image=CloneImage(image,(image->columns+horizontal_factor-1)/
horizontal_factor,(image->rows+vertical_factor-1)/vertical_factor,
MagickTrue,exception);
if (chroma_image == (Image *) NULL)
{
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Convert raster image to pixel packets.
*/
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
break;
if (interlace == PartitionInterlace)
{
AppendImageFormat("Y",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*chroma_pixels;
if (interlace == NoInterlace)
{
if ((y > 0) || (GetPreviousImageInList(image) == (Image *) NULL))
{
length=2*quantum*image->columns;
count=ReadBlob(image,length,scanline);
if (count != (ssize_t) length)
{
status=MagickFalse;
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
p=scanline;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
chroma_pixels=QueueAuthenticPixels(chroma_image,0,y,
chroma_image->columns,1,exception);
if (chroma_pixels == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x+=2)
{
SetPixelRed(chroma_image,0,chroma_pixels);
if (quantum == 1)
SetPixelGreen(chroma_image,ScaleCharToQuantum(*p++),
chroma_pixels);
else
{
SetPixelGreen(chroma_image,ScaleShortToQuantum(((*p) << 8) |
*(p+1)),chroma_pixels);
p+=2;
}
if (quantum == 1)
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
else
{
SetPixelRed(image,ScaleShortToQuantum(((*p) << 8) | *(p+1)),q);
p+=2;
}
SetPixelGreen(image,0,q);
SetPixelBlue(image,0,q);
q+=GetPixelChannels(image);
SetPixelGreen(image,0,q);
SetPixelBlue(image,0,q);
if (quantum == 1)
SetPixelBlue(chroma_image,ScaleCharToQuantum(*p++),chroma_pixels);
else
{
SetPixelBlue(chroma_image,ScaleShortToQuantum(((*p) << 8) |
*(p+1)),chroma_pixels);
p+=2;
}
if (quantum == 1)
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
else
{
SetPixelRed(image,ScaleShortToQuantum(((*p) << 8) | *(p+1)),q);
p+=2;
}
chroma_pixels+=GetPixelChannels(chroma_image);
q+=GetPixelChannels(image);
}
}
else
{
if ((y > 0) || (GetPreviousImageInList(image) == (Image *) NULL))
{
length=quantum*image->columns;
count=ReadBlob(image,length,scanline);
if (count != (ssize_t) length)
{
status=MagickFalse;
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
p=scanline;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (quantum == 1)
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
else
{
SetPixelRed(image,ScaleShortToQuantum(((*p) << 8) | *(p+1)),q);
p+=2;
}
SetPixelGreen(image,0,q);
SetPixelBlue(image,0,q);
q+=GetPixelChannels(image);
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (interlace == NoInterlace)
if (SyncAuthenticPixels(chroma_image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
if (interlace == PartitionInterlace)
{
(void) CloseBlob(image);
AppendImageFormat("U",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (interlace != NoInterlace)
{
for (y=0; y < (ssize_t) chroma_image->rows; y++)
{
length=quantum*chroma_image->columns;
count=ReadBlob(image,length,scanline);
if (count != (ssize_t) length)
{
status=MagickFalse;
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
p=scanline;
q=QueueAuthenticPixels(chroma_image,0,y,chroma_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) chroma_image->columns; x++)
{
SetPixelRed(chroma_image,0,q);
if (quantum == 1)
SetPixelGreen(chroma_image,ScaleCharToQuantum(*p++),q);
else
{
SetPixelGreen(chroma_image,ScaleShortToQuantum(((*p) << 8) |
*(p+1)),q);
p+=2;
}
SetPixelBlue(chroma_image,0,q);
q+=GetPixelChannels(chroma_image);
}
if (SyncAuthenticPixels(chroma_image,exception) == MagickFalse)
break;
}
if (interlace == PartitionInterlace)
{
(void) CloseBlob(image);
AppendImageFormat("V",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
for (y=0; y < (ssize_t) chroma_image->rows; y++)
{
length=quantum*chroma_image->columns;
count=ReadBlob(image,length,scanline);
if (count != (ssize_t) length)
{
status=MagickFalse;
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
p=scanline;
q=GetAuthenticPixels(chroma_image,0,y,chroma_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) chroma_image->columns; x++)
{
if (quantum == 1)
SetPixelBlue(chroma_image,ScaleCharToQuantum(*p++),q);
else
{
SetPixelBlue(chroma_image,ScaleShortToQuantum(((*p) << 8) |
*(p+1)),q);
p+=2;
}
q+=GetPixelChannels(chroma_image);
}
if (SyncAuthenticPixels(chroma_image,exception) == MagickFalse)
break;
}
}
/*
Scale image.
*/
resize_image=ResizeImage(chroma_image,image->columns,image->rows,
TriangleFilter,exception);
chroma_image=DestroyImage(chroma_image);
if (resize_image == (Image *) NULL)
{
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
for (y=0; y < (ssize_t) image->rows; y++)
{
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
chroma_pixels=GetVirtualPixels(resize_image,0,y,resize_image->columns,1,
exception);
if ((q == (Quantum *) NULL) ||
(chroma_pixels == (const Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGreen(image,GetPixelGreen(resize_image,chroma_pixels),q);
SetPixelBlue(image,GetPixelBlue(resize_image,chroma_pixels),q);
chroma_pixels+=GetPixelChannels(resize_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
resize_image=DestroyImage(resize_image);
if (SetImageColorspace(image,YCbCrColorspace,exception) == MagickFalse)
break;
if (interlace == PartitionInterlace)
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if (interlace == NoInterlace)
count=ReadBlob(image,(size_t) (2*quantum*image->columns),scanline);
else
count=ReadBlob(image,(size_t) quantum*image->columns,scanline);
if (count != 0)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (count != 0);
scanline=(unsigned char *) RelinquishMagickMemory(scanline);
(void) CloseBlob(image);
if (status == MagickFalse)
return(DestroyImageList(image));
return(GetFirstImageInList(image));
}
| 167,738 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void StopCastCallback(
CastConfigDelegate* cast_config,
const CastConfigDelegate::ReceiversAndActivites& receivers_activities) {
for (auto& item : receivers_activities) {
CastConfigDelegate::Activity activity = item.second.activity;
if (activity.allow_stop && activity.id.empty() == false)
cast_config->StopCasting(activity.id);
}
}
Commit Message: Allow the cast tray to function as expected when the installed extension is missing API methods.
BUG=489445
Review URL: https://codereview.chromium.org/1145833003
Cr-Commit-Position: refs/heads/master@{#330663}
CWE ID: CWE-79 | void StopCastCallback(
| 171,626 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int yr_execute_code(
YR_RULES* rules,
YR_SCAN_CONTEXT* context,
int timeout,
time_t start_time)
{
int64_t mem[MEM_SIZE];
int32_t sp = 0;
uint8_t* ip = rules->code_start;
YR_VALUE args[MAX_FUNCTION_ARGS];
YR_VALUE *stack;
YR_VALUE r1;
YR_VALUE r2;
YR_VALUE r3;
#ifdef PROFILING_ENABLED
YR_RULE* current_rule = NULL;
#endif
YR_RULE* rule;
YR_MATCH* match;
YR_OBJECT_FUNCTION* function;
char* identifier;
char* args_fmt;
int i;
int found;
int count;
int result = ERROR_SUCCESS;
int stop = FALSE;
int cycle = 0;
int tidx = context->tidx;
int stack_size;
#ifdef PROFILING_ENABLED
clock_t start = clock();
#endif
yr_get_configuration(YR_CONFIG_STACK_SIZE, (void*) &stack_size);
stack = (YR_VALUE*) yr_malloc(stack_size * sizeof(YR_VALUE));
if (stack == NULL)
return ERROR_INSUFFICIENT_MEMORY;
while(!stop)
{
switch(*ip)
{
case OP_HALT:
assert(sp == 0); // When HALT is reached the stack should be empty.
stop = TRUE;
break;
case OP_PUSH:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
push(r1);
break;
case OP_POP:
pop(r1);
break;
case OP_CLEAR_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
mem[r1.i] = 0;
break;
case OP_ADD_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
pop(r2);
if (!is_undef(r2))
mem[r1.i] += r2.i;
break;
case OP_INCR_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
mem[r1.i]++;
break;
case OP_PUSH_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
r1.i = mem[r1.i];
push(r1);
break;
case OP_POP_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
pop(r2);
mem[r1.i] = r2.i;
break;
case OP_SWAPUNDEF:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
pop(r2);
if (is_undef(r2))
{
r1.i = mem[r1.i];
push(r1);
}
else
{
push(r2);
}
break;
case OP_JNUNDEF:
pop(r1);
push(r1);
ip = jmp_if(!is_undef(r1), ip);
break;
case OP_JLE:
pop(r2);
pop(r1);
push(r1);
push(r2);
ip = jmp_if(r1.i <= r2.i, ip);
break;
case OP_JTRUE:
pop(r1);
push(r1);
ip = jmp_if(!is_undef(r1) && r1.i, ip);
break;
case OP_JFALSE:
pop(r1);
push(r1);
ip = jmp_if(is_undef(r1) || !r1.i, ip);
break;
case OP_AND:
pop(r2);
pop(r1);
if (is_undef(r1) || is_undef(r2))
r1.i = 0;
else
r1.i = r1.i && r2.i;
push(r1);
break;
case OP_OR:
pop(r2);
pop(r1);
if (is_undef(r1))
{
push(r2);
}
else if (is_undef(r2))
{
push(r1);
}
else
{
r1.i = r1.i || r2.i;
push(r1);
}
break;
case OP_NOT:
pop(r1);
if (is_undef(r1))
r1.i = UNDEFINED;
else
r1.i= !r1.i;
push(r1);
break;
case OP_MOD:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
if (r2.i != 0)
r1.i = r1.i % r2.i;
else
r1.i = UNDEFINED;
push(r1);
break;
case OP_SHR:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i >> r2.i;
push(r1);
break;
case OP_SHL:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i << r2.i;
push(r1);
break;
case OP_BITWISE_NOT:
pop(r1);
ensure_defined(r1);
r1.i = ~r1.i;
push(r1);
break;
case OP_BITWISE_AND:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i & r2.i;
push(r1);
break;
case OP_BITWISE_OR:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i | r2.i;
push(r1);
break;
case OP_BITWISE_XOR:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i ^ r2.i;
push(r1);
break;
case OP_PUSH_RULE:
rule = *(YR_RULE**)(ip + 1);
ip += sizeof(uint64_t);
r1.i = rule->t_flags[tidx] & RULE_TFLAGS_MATCH ? 1 : 0;
push(r1);
break;
case OP_INIT_RULE:
#ifdef PROFILING_ENABLED
current_rule = *(YR_RULE**)(ip + 1);
#endif
ip += sizeof(uint64_t);
break;
case OP_MATCH_RULE:
pop(r1);
rule = *(YR_RULE**)(ip + 1);
ip += sizeof(uint64_t);
if (!is_undef(r1) && r1.i)
rule->t_flags[tidx] |= RULE_TFLAGS_MATCH;
else if (RULE_IS_GLOBAL(rule))
rule->ns->t_flags[tidx] |= NAMESPACE_TFLAGS_UNSATISFIED_GLOBAL;
#ifdef PROFILING_ENABLED
rule->clock_ticks += clock() - start;
start = clock();
#endif
break;
case OP_OBJ_LOAD:
identifier = *(char**)(ip + 1);
ip += sizeof(uint64_t);
r1.o = (YR_OBJECT*) yr_hash_table_lookup(
context->objects_table,
identifier,
NULL);
assert(r1.o != NULL);
push(r1);
break;
case OP_OBJ_FIELD:
identifier = *(char**)(ip + 1);
ip += sizeof(uint64_t);
pop(r1);
ensure_defined(r1);
r1.o = yr_object_lookup_field(r1.o, identifier);
assert(r1.o != NULL);
push(r1);
break;
case OP_OBJ_VALUE:
pop(r1);
ensure_defined(r1);
switch(r1.o->type)
{
case OBJECT_TYPE_INTEGER:
r1.i = ((YR_OBJECT_INTEGER*) r1.o)->value;
break;
case OBJECT_TYPE_FLOAT:
if (isnan(((YR_OBJECT_DOUBLE*) r1.o)->value))
r1.i = UNDEFINED;
else
r1.d = ((YR_OBJECT_DOUBLE*) r1.o)->value;
break;
case OBJECT_TYPE_STRING:
if (((YR_OBJECT_STRING*) r1.o)->value == NULL)
r1.i = UNDEFINED;
else
r1.p = ((YR_OBJECT_STRING*) r1.o)->value;
break;
default:
assert(FALSE);
}
push(r1);
break;
case OP_INDEX_ARRAY:
pop(r1); // index
pop(r2); // array
ensure_defined(r1);
ensure_defined(r2);
assert(r2.o->type == OBJECT_TYPE_ARRAY);
r1.o = yr_object_array_get_item(r2.o, 0, (int) r1.i);
if (r1.o == NULL)
r1.i = UNDEFINED;
push(r1);
break;
case OP_LOOKUP_DICT:
pop(r1); // key
pop(r2); // dictionary
ensure_defined(r1);
ensure_defined(r2);
assert(r2.o->type == OBJECT_TYPE_DICTIONARY);
r1.o = yr_object_dict_get_item(
r2.o, 0, r1.ss->c_string);
if (r1.o == NULL)
r1.i = UNDEFINED;
push(r1);
break;
case OP_CALL:
args_fmt = *(char**)(ip + 1);
ip += sizeof(uint64_t);
i = (int) strlen(args_fmt);
count = 0;
while (i > 0)
{
pop(r1);
if (is_undef(r1)) // count the number of undefined args
count++;
args[i - 1] = r1;
i--;
}
pop(r2);
ensure_defined(r2);
if (count > 0)
{
r1.i = UNDEFINED;
push(r1);
break;
}
function = (YR_OBJECT_FUNCTION*) r2.o;
result = ERROR_INTERNAL_FATAL_ERROR;
for (i = 0; i < MAX_OVERLOADED_FUNCTIONS; i++)
{
if (function->prototypes[i].arguments_fmt == NULL)
break;
if (strcmp(function->prototypes[i].arguments_fmt, args_fmt) == 0)
{
result = function->prototypes[i].code(args, context, function);
break;
}
}
assert(i < MAX_OVERLOADED_FUNCTIONS);
if (result == ERROR_SUCCESS)
{
r1.o = function->return_obj;
push(r1);
}
else
{
stop = TRUE;
}
break;
case OP_FOUND:
pop(r1);
r1.i = r1.s->matches[tidx].tail != NULL ? 1 : 0;
push(r1);
break;
case OP_FOUND_AT:
pop(r2);
pop(r1);
if (is_undef(r1))
{
r1.i = 0;
push(r1);
break;
}
match = r2.s->matches[tidx].head;
r3.i = FALSE;
while (match != NULL)
{
if (r1.i == match->base + match->offset)
{
r3.i = TRUE;
break;
}
if (r1.i < match->base + match->offset)
break;
match = match->next;
}
push(r3);
break;
case OP_FOUND_IN:
pop(r3);
pop(r2);
pop(r1);
ensure_defined(r1);
ensure_defined(r2);
match = r3.s->matches[tidx].head;
r3.i = FALSE;
while (match != NULL && !r3.i)
{
if (match->base + match->offset >= r1.i &&
match->base + match->offset <= r2.i)
{
r3.i = TRUE;
}
if (match->base + match->offset > r2.i)
break;
match = match->next;
}
push(r3);
break;
case OP_COUNT:
pop(r1);
r1.i = r1.s->matches[tidx].count;
push(r1);
break;
case OP_OFFSET:
pop(r2);
pop(r1);
ensure_defined(r1);
match = r2.s->matches[tidx].head;
i = 1;
r3.i = UNDEFINED;
while (match != NULL && r3.i == UNDEFINED)
{
if (r1.i == i)
r3.i = match->base + match->offset;
i++;
match = match->next;
}
push(r3);
break;
case OP_LENGTH:
pop(r2);
pop(r1);
ensure_defined(r1);
match = r2.s->matches[tidx].head;
i = 1;
r3.i = UNDEFINED;
while (match != NULL && r3.i == UNDEFINED)
{
if (r1.i == i)
r3.i = match->match_length;
i++;
match = match->next;
}
push(r3);
break;
case OP_OF:
found = 0;
count = 0;
pop(r1);
while (!is_undef(r1))
{
if (r1.s->matches[tidx].tail != NULL)
found++;
count++;
pop(r1);
}
pop(r2);
if (is_undef(r2))
r1.i = found >= count ? 1 : 0;
else
r1.i = found >= r2.i ? 1 : 0;
push(r1);
break;
case OP_FILESIZE:
r1.i = context->file_size;
push(r1);
break;
case OP_ENTRYPOINT:
r1.i = context->entry_point;
push(r1);
break;
case OP_INT8:
pop(r1);
r1.i = read_int8_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT16:
pop(r1);
r1.i = read_int16_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT32:
pop(r1);
r1.i = read_int32_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT8:
pop(r1);
r1.i = read_uint8_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT16:
pop(r1);
r1.i = read_uint16_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT32:
pop(r1);
r1.i = read_uint32_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT8BE:
pop(r1);
r1.i = read_int8_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT16BE:
pop(r1);
r1.i = read_int16_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT32BE:
pop(r1);
r1.i = read_int32_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT8BE:
pop(r1);
r1.i = read_uint8_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT16BE:
pop(r1);
r1.i = read_uint16_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT32BE:
pop(r1);
r1.i = read_uint32_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_CONTAINS:
pop(r2);
pop(r1);
ensure_defined(r1);
ensure_defined(r2);
r1.i = memmem(r1.ss->c_string, r1.ss->length,
r2.ss->c_string, r2.ss->length) != NULL;
push(r1);
break;
case OP_IMPORT:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
result = yr_modules_load((char*) r1.p, context);
if (result != ERROR_SUCCESS)
stop = TRUE;
break;
case OP_MATCHES:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
if (r1.ss->length == 0)
{
r1.i = FALSE;
push(r1);
break;
}
r1.i = yr_re_exec(
(uint8_t*) r2.re->code,
(uint8_t*) r1.ss->c_string,
r1.ss->length,
r2.re->flags | RE_FLAGS_SCAN,
NULL,
NULL) >= 0;
push(r1);
break;
case OP_INT_TO_DBL:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
r2 = stack[sp - r1.i];
if (is_undef(r2))
stack[sp - r1.i].i = UNDEFINED;
else
stack[sp - r1.i].d = (double) r2.i;
break;
case OP_STR_TO_BOOL:
pop(r1);
ensure_defined(r1);
r1.i = r1.ss->length > 0;
push(r1);
break;
case OP_INT_EQ:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i == r2.i;
push(r1);
break;
case OP_INT_NEQ:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i != r2.i;
push(r1);
break;
case OP_INT_LT:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i < r2.i;
push(r1);
break;
case OP_INT_GT:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i > r2.i;
push(r1);
break;
case OP_INT_LE:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i <= r2.i;
push(r1);
break;
case OP_INT_GE:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i >= r2.i;
push(r1);
break;
case OP_INT_ADD:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i + r2.i;
push(r1);
break;
case OP_INT_SUB:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i - r2.i;
push(r1);
break;
case OP_INT_MUL:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i * r2.i;
push(r1);
break;
case OP_INT_DIV:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
if (r2.i != 0)
r1.i = r1.i / r2.i;
else
r1.i = UNDEFINED;
push(r1);
break;
case OP_INT_MINUS:
pop(r1);
ensure_defined(r1);
r1.i = -r1.i;
push(r1);
break;
case OP_DBL_LT:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d < r2.d;
push(r1);
break;
case OP_DBL_GT:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d > r2.d;
push(r1);
break;
case OP_DBL_LE:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d <= r2.d;
push(r1);
break;
case OP_DBL_GE:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d >= r2.d;
push(r1);
break;
case OP_DBL_EQ:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d == r2.d;
push(r1);
break;
case OP_DBL_NEQ:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d != r2.d;
push(r1);
break;
case OP_DBL_ADD:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.d = r1.d + r2.d;
push(r1);
break;
case OP_DBL_SUB:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.d = r1.d - r2.d;
push(r1);
break;
case OP_DBL_MUL:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.d = r1.d * r2.d;
push(r1);
break;
case OP_DBL_DIV:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.d = r1.d / r2.d;
push(r1);
break;
case OP_DBL_MINUS:
pop(r1);
ensure_defined(r1);
r1.d = -r1.d;
push(r1);
break;
case OP_STR_EQ:
case OP_STR_NEQ:
case OP_STR_LT:
case OP_STR_LE:
case OP_STR_GT:
case OP_STR_GE:
pop(r2);
pop(r1);
ensure_defined(r1);
ensure_defined(r2);
switch(*ip)
{
case OP_STR_EQ:
r1.i = (sized_string_cmp(r1.ss, r2.ss) == 0);
break;
case OP_STR_NEQ:
r1.i = (sized_string_cmp(r1.ss, r2.ss) != 0);
break;
case OP_STR_LT:
r1.i = (sized_string_cmp(r1.ss, r2.ss) < 0);
break;
case OP_STR_LE:
r1.i = (sized_string_cmp(r1.ss, r2.ss) <= 0);
break;
case OP_STR_GT:
r1.i = (sized_string_cmp(r1.ss, r2.ss) > 0);
break;
case OP_STR_GE:
r1.i = (sized_string_cmp(r1.ss, r2.ss) >= 0);
break;
}
push(r1);
break;
default:
assert(FALSE);
}
if (timeout > 0) // timeout == 0 means no timeout
{
if (++cycle == 10)
{
if (difftime(time(NULL), start_time) > timeout)
{
#ifdef PROFILING_ENABLED
assert(current_rule != NULL);
current_rule->clock_ticks += clock() - start;
#endif
result = ERROR_SCAN_TIMEOUT;
stop = TRUE;
}
cycle = 0;
}
}
ip++;
}
yr_modules_unload_all(context);
yr_free(stack);
return result;
}
Commit Message: Fix issue #646 (#648)
* Fix issue #646 and some edge cases with wide regexps using \b and \B
* Rename function IS_WORD_CHAR to _yr_re_is_word_char
CWE ID: CWE-125 | int yr_execute_code(
YR_RULES* rules,
YR_SCAN_CONTEXT* context,
int timeout,
time_t start_time)
{
int64_t mem[MEM_SIZE];
int32_t sp = 0;
uint8_t* ip = rules->code_start;
YR_VALUE args[MAX_FUNCTION_ARGS];
YR_VALUE *stack;
YR_VALUE r1;
YR_VALUE r2;
YR_VALUE r3;
#ifdef PROFILING_ENABLED
YR_RULE* current_rule = NULL;
#endif
YR_RULE* rule;
YR_MATCH* match;
YR_OBJECT_FUNCTION* function;
char* identifier;
char* args_fmt;
int i;
int found;
int count;
int result = ERROR_SUCCESS;
int stop = FALSE;
int cycle = 0;
int tidx = context->tidx;
int stack_size;
#ifdef PROFILING_ENABLED
clock_t start = clock();
#endif
yr_get_configuration(YR_CONFIG_STACK_SIZE, (void*) &stack_size);
stack = (YR_VALUE*) yr_malloc(stack_size * sizeof(YR_VALUE));
if (stack == NULL)
return ERROR_INSUFFICIENT_MEMORY;
while(!stop)
{
switch(*ip)
{
case OP_HALT:
assert(sp == 0); // When HALT is reached the stack should be empty.
stop = TRUE;
break;
case OP_PUSH:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
push(r1);
break;
case OP_POP:
pop(r1);
break;
case OP_CLEAR_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
mem[r1.i] = 0;
break;
case OP_ADD_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
pop(r2);
if (!is_undef(r2))
mem[r1.i] += r2.i;
break;
case OP_INCR_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
mem[r1.i]++;
break;
case OP_PUSH_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
r1.i = mem[r1.i];
push(r1);
break;
case OP_POP_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
pop(r2);
mem[r1.i] = r2.i;
break;
case OP_SWAPUNDEF:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
pop(r2);
if (is_undef(r2))
{
r1.i = mem[r1.i];
push(r1);
}
else
{
push(r2);
}
break;
case OP_JNUNDEF:
pop(r1);
push(r1);
ip = jmp_if(!is_undef(r1), ip);
break;
case OP_JLE:
pop(r2);
pop(r1);
push(r1);
push(r2);
ip = jmp_if(r1.i <= r2.i, ip);
break;
case OP_JTRUE:
pop(r1);
push(r1);
ip = jmp_if(!is_undef(r1) && r1.i, ip);
break;
case OP_JFALSE:
pop(r1);
push(r1);
ip = jmp_if(is_undef(r1) || !r1.i, ip);
break;
case OP_AND:
pop(r2);
pop(r1);
if (is_undef(r1) || is_undef(r2))
r1.i = 0;
else
r1.i = r1.i && r2.i;
push(r1);
break;
case OP_OR:
pop(r2);
pop(r1);
if (is_undef(r1))
{
push(r2);
}
else if (is_undef(r2))
{
push(r1);
}
else
{
r1.i = r1.i || r2.i;
push(r1);
}
break;
case OP_NOT:
pop(r1);
if (is_undef(r1))
r1.i = UNDEFINED;
else
r1.i= !r1.i;
push(r1);
break;
case OP_MOD:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
if (r2.i != 0)
r1.i = r1.i % r2.i;
else
r1.i = UNDEFINED;
push(r1);
break;
case OP_SHR:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i >> r2.i;
push(r1);
break;
case OP_SHL:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i << r2.i;
push(r1);
break;
case OP_BITWISE_NOT:
pop(r1);
ensure_defined(r1);
r1.i = ~r1.i;
push(r1);
break;
case OP_BITWISE_AND:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i & r2.i;
push(r1);
break;
case OP_BITWISE_OR:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i | r2.i;
push(r1);
break;
case OP_BITWISE_XOR:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i ^ r2.i;
push(r1);
break;
case OP_PUSH_RULE:
rule = *(YR_RULE**)(ip + 1);
ip += sizeof(uint64_t);
r1.i = rule->t_flags[tidx] & RULE_TFLAGS_MATCH ? 1 : 0;
push(r1);
break;
case OP_INIT_RULE:
#ifdef PROFILING_ENABLED
current_rule = *(YR_RULE**)(ip + 1);
#endif
ip += sizeof(uint64_t);
break;
case OP_MATCH_RULE:
pop(r1);
rule = *(YR_RULE**)(ip + 1);
ip += sizeof(uint64_t);
if (!is_undef(r1) && r1.i)
rule->t_flags[tidx] |= RULE_TFLAGS_MATCH;
else if (RULE_IS_GLOBAL(rule))
rule->ns->t_flags[tidx] |= NAMESPACE_TFLAGS_UNSATISFIED_GLOBAL;
#ifdef PROFILING_ENABLED
rule->clock_ticks += clock() - start;
start = clock();
#endif
break;
case OP_OBJ_LOAD:
identifier = *(char**)(ip + 1);
ip += sizeof(uint64_t);
r1.o = (YR_OBJECT*) yr_hash_table_lookup(
context->objects_table,
identifier,
NULL);
assert(r1.o != NULL);
push(r1);
break;
case OP_OBJ_FIELD:
identifier = *(char**)(ip + 1);
ip += sizeof(uint64_t);
pop(r1);
ensure_defined(r1);
r1.o = yr_object_lookup_field(r1.o, identifier);
assert(r1.o != NULL);
push(r1);
break;
case OP_OBJ_VALUE:
pop(r1);
ensure_defined(r1);
switch(r1.o->type)
{
case OBJECT_TYPE_INTEGER:
r1.i = ((YR_OBJECT_INTEGER*) r1.o)->value;
break;
case OBJECT_TYPE_FLOAT:
if (isnan(((YR_OBJECT_DOUBLE*) r1.o)->value))
r1.i = UNDEFINED;
else
r1.d = ((YR_OBJECT_DOUBLE*) r1.o)->value;
break;
case OBJECT_TYPE_STRING:
if (((YR_OBJECT_STRING*) r1.o)->value == NULL)
r1.i = UNDEFINED;
else
r1.p = ((YR_OBJECT_STRING*) r1.o)->value;
break;
default:
assert(FALSE);
}
push(r1);
break;
case OP_INDEX_ARRAY:
pop(r1); // index
pop(r2); // array
ensure_defined(r1);
ensure_defined(r2);
assert(r2.o->type == OBJECT_TYPE_ARRAY);
r1.o = yr_object_array_get_item(r2.o, 0, (int) r1.i);
if (r1.o == NULL)
r1.i = UNDEFINED;
push(r1);
break;
case OP_LOOKUP_DICT:
pop(r1); // key
pop(r2); // dictionary
ensure_defined(r1);
ensure_defined(r2);
assert(r2.o->type == OBJECT_TYPE_DICTIONARY);
r1.o = yr_object_dict_get_item(
r2.o, 0, r1.ss->c_string);
if (r1.o == NULL)
r1.i = UNDEFINED;
push(r1);
break;
case OP_CALL:
args_fmt = *(char**)(ip + 1);
ip += sizeof(uint64_t);
i = (int) strlen(args_fmt);
count = 0;
while (i > 0)
{
pop(r1);
if (is_undef(r1)) // count the number of undefined args
count++;
args[i - 1] = r1;
i--;
}
pop(r2);
ensure_defined(r2);
if (count > 0)
{
r1.i = UNDEFINED;
push(r1);
break;
}
function = (YR_OBJECT_FUNCTION*) r2.o;
result = ERROR_INTERNAL_FATAL_ERROR;
for (i = 0; i < MAX_OVERLOADED_FUNCTIONS; i++)
{
if (function->prototypes[i].arguments_fmt == NULL)
break;
if (strcmp(function->prototypes[i].arguments_fmt, args_fmt) == 0)
{
result = function->prototypes[i].code(args, context, function);
break;
}
}
assert(i < MAX_OVERLOADED_FUNCTIONS);
if (result == ERROR_SUCCESS)
{
r1.o = function->return_obj;
push(r1);
}
else
{
stop = TRUE;
}
break;
case OP_FOUND:
pop(r1);
r1.i = r1.s->matches[tidx].tail != NULL ? 1 : 0;
push(r1);
break;
case OP_FOUND_AT:
pop(r2);
pop(r1);
if (is_undef(r1))
{
r1.i = 0;
push(r1);
break;
}
match = r2.s->matches[tidx].head;
r3.i = FALSE;
while (match != NULL)
{
if (r1.i == match->base + match->offset)
{
r3.i = TRUE;
break;
}
if (r1.i < match->base + match->offset)
break;
match = match->next;
}
push(r3);
break;
case OP_FOUND_IN:
pop(r3);
pop(r2);
pop(r1);
ensure_defined(r1);
ensure_defined(r2);
match = r3.s->matches[tidx].head;
r3.i = FALSE;
while (match != NULL && !r3.i)
{
if (match->base + match->offset >= r1.i &&
match->base + match->offset <= r2.i)
{
r3.i = TRUE;
}
if (match->base + match->offset > r2.i)
break;
match = match->next;
}
push(r3);
break;
case OP_COUNT:
pop(r1);
r1.i = r1.s->matches[tidx].count;
push(r1);
break;
case OP_OFFSET:
pop(r2);
pop(r1);
ensure_defined(r1);
match = r2.s->matches[tidx].head;
i = 1;
r3.i = UNDEFINED;
while (match != NULL && r3.i == UNDEFINED)
{
if (r1.i == i)
r3.i = match->base + match->offset;
i++;
match = match->next;
}
push(r3);
break;
case OP_LENGTH:
pop(r2);
pop(r1);
ensure_defined(r1);
match = r2.s->matches[tidx].head;
i = 1;
r3.i = UNDEFINED;
while (match != NULL && r3.i == UNDEFINED)
{
if (r1.i == i)
r3.i = match->match_length;
i++;
match = match->next;
}
push(r3);
break;
case OP_OF:
found = 0;
count = 0;
pop(r1);
while (!is_undef(r1))
{
if (r1.s->matches[tidx].tail != NULL)
found++;
count++;
pop(r1);
}
pop(r2);
if (is_undef(r2))
r1.i = found >= count ? 1 : 0;
else
r1.i = found >= r2.i ? 1 : 0;
push(r1);
break;
case OP_FILESIZE:
r1.i = context->file_size;
push(r1);
break;
case OP_ENTRYPOINT:
r1.i = context->entry_point;
push(r1);
break;
case OP_INT8:
pop(r1);
r1.i = read_int8_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT16:
pop(r1);
r1.i = read_int16_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT32:
pop(r1);
r1.i = read_int32_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT8:
pop(r1);
r1.i = read_uint8_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT16:
pop(r1);
r1.i = read_uint16_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT32:
pop(r1);
r1.i = read_uint32_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT8BE:
pop(r1);
r1.i = read_int8_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT16BE:
pop(r1);
r1.i = read_int16_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT32BE:
pop(r1);
r1.i = read_int32_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT8BE:
pop(r1);
r1.i = read_uint8_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT16BE:
pop(r1);
r1.i = read_uint16_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT32BE:
pop(r1);
r1.i = read_uint32_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_CONTAINS:
pop(r2);
pop(r1);
ensure_defined(r1);
ensure_defined(r2);
r1.i = memmem(r1.ss->c_string, r1.ss->length,
r2.ss->c_string, r2.ss->length) != NULL;
push(r1);
break;
case OP_IMPORT:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
result = yr_modules_load((char*) r1.p, context);
if (result != ERROR_SUCCESS)
stop = TRUE;
break;
case OP_MATCHES:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
if (r1.ss->length == 0)
{
r1.i = FALSE;
push(r1);
break;
}
r1.i = yr_re_exec(
(uint8_t*) r2.re->code,
(uint8_t*) r1.ss->c_string,
r1.ss->length,
0,
r2.re->flags | RE_FLAGS_SCAN,
NULL,
NULL) >= 0;
push(r1);
break;
case OP_INT_TO_DBL:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
r2 = stack[sp - r1.i];
if (is_undef(r2))
stack[sp - r1.i].i = UNDEFINED;
else
stack[sp - r1.i].d = (double) r2.i;
break;
case OP_STR_TO_BOOL:
pop(r1);
ensure_defined(r1);
r1.i = r1.ss->length > 0;
push(r1);
break;
case OP_INT_EQ:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i == r2.i;
push(r1);
break;
case OP_INT_NEQ:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i != r2.i;
push(r1);
break;
case OP_INT_LT:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i < r2.i;
push(r1);
break;
case OP_INT_GT:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i > r2.i;
push(r1);
break;
case OP_INT_LE:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i <= r2.i;
push(r1);
break;
case OP_INT_GE:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i >= r2.i;
push(r1);
break;
case OP_INT_ADD:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i + r2.i;
push(r1);
break;
case OP_INT_SUB:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i - r2.i;
push(r1);
break;
case OP_INT_MUL:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i * r2.i;
push(r1);
break;
case OP_INT_DIV:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
if (r2.i != 0)
r1.i = r1.i / r2.i;
else
r1.i = UNDEFINED;
push(r1);
break;
case OP_INT_MINUS:
pop(r1);
ensure_defined(r1);
r1.i = -r1.i;
push(r1);
break;
case OP_DBL_LT:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d < r2.d;
push(r1);
break;
case OP_DBL_GT:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d > r2.d;
push(r1);
break;
case OP_DBL_LE:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d <= r2.d;
push(r1);
break;
case OP_DBL_GE:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d >= r2.d;
push(r1);
break;
case OP_DBL_EQ:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d == r2.d;
push(r1);
break;
case OP_DBL_NEQ:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d != r2.d;
push(r1);
break;
case OP_DBL_ADD:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.d = r1.d + r2.d;
push(r1);
break;
case OP_DBL_SUB:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.d = r1.d - r2.d;
push(r1);
break;
case OP_DBL_MUL:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.d = r1.d * r2.d;
push(r1);
break;
case OP_DBL_DIV:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.d = r1.d / r2.d;
push(r1);
break;
case OP_DBL_MINUS:
pop(r1);
ensure_defined(r1);
r1.d = -r1.d;
push(r1);
break;
case OP_STR_EQ:
case OP_STR_NEQ:
case OP_STR_LT:
case OP_STR_LE:
case OP_STR_GT:
case OP_STR_GE:
pop(r2);
pop(r1);
ensure_defined(r1);
ensure_defined(r2);
switch(*ip)
{
case OP_STR_EQ:
r1.i = (sized_string_cmp(r1.ss, r2.ss) == 0);
break;
case OP_STR_NEQ:
r1.i = (sized_string_cmp(r1.ss, r2.ss) != 0);
break;
case OP_STR_LT:
r1.i = (sized_string_cmp(r1.ss, r2.ss) < 0);
break;
case OP_STR_LE:
r1.i = (sized_string_cmp(r1.ss, r2.ss) <= 0);
break;
case OP_STR_GT:
r1.i = (sized_string_cmp(r1.ss, r2.ss) > 0);
break;
case OP_STR_GE:
r1.i = (sized_string_cmp(r1.ss, r2.ss) >= 0);
break;
}
push(r1);
break;
default:
assert(FALSE);
}
if (timeout > 0) // timeout == 0 means no timeout
{
if (++cycle == 10)
{
if (difftime(time(NULL), start_time) > timeout)
{
#ifdef PROFILING_ENABLED
assert(current_rule != NULL);
current_rule->clock_ticks += clock() - start;
#endif
result = ERROR_SCAN_TIMEOUT;
stop = TRUE;
}
cycle = 0;
}
}
ip++;
}
yr_modules_unload_all(context);
yr_free(stack);
return result;
}
| 168,201 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: VOID ixheaacd_esbr_postradixcompute2(WORD32 *ptr_y, WORD32 *ptr_x,
const WORD32 *pdig_rev_tbl,
WORD32 npoints) {
WORD32 i, k;
WORD32 h2;
WORD32 x_0, x_1, x_2, x_3;
WORD32 x_4, x_5, x_6, x_7;
WORD32 x_8, x_9, x_a, x_b, x_c, x_d, x_e, x_f;
WORD32 n00, n10, n20, n30, n01, n11, n21, n31;
WORD32 n02, n12, n22, n32, n03, n13, n23, n33;
WORD32 n0, j0;
WORD32 *x2, *x0;
WORD32 *y0, *y1, *y2, *y3;
y0 = ptr_y;
y2 = ptr_y + (WORD32)npoints;
x0 = ptr_x;
x2 = ptr_x + (WORD32)(npoints >> 1);
y1 = y0 + (WORD32)(npoints >> 2);
y3 = y2 + (WORD32)(npoints >> 2);
j0 = 8;
n0 = npoints >> 1;
for (k = 0; k < 2; k++) {
for (i = 0; i<npoints>> 1; i += 8) {
h2 = *pdig_rev_tbl++ >> 2;
x_0 = *x0++;
x_1 = *x0++;
x_2 = *x0++;
x_3 = *x0++;
x_4 = *x0++;
x_5 = *x0++;
x_6 = *x0++;
x_7 = *x0++;
n00 = x_0 + x_2;
n01 = x_1 + x_3;
n20 = x_0 - x_2;
n21 = x_1 - x_3;
n10 = x_4 + x_6;
n11 = x_5 + x_7;
n30 = x_4 - x_6;
n31 = x_5 - x_7;
y0[h2] = n00;
y0[h2 + 1] = n01;
y1[h2] = n10;
y1[h2 + 1] = n11;
y2[h2] = n20;
y2[h2 + 1] = n21;
y3[h2] = n30;
y3[h2 + 1] = n31;
x_8 = *x2++;
x_9 = *x2++;
x_a = *x2++;
x_b = *x2++;
x_c = *x2++;
x_d = *x2++;
x_e = *x2++;
x_f = *x2++;
n02 = x_8 + x_a;
n03 = x_9 + x_b;
n22 = x_8 - x_a;
n23 = x_9 - x_b;
n12 = x_c + x_e;
n13 = x_d + x_f;
n32 = x_c - x_e;
n33 = x_d - x_f;
y0[h2 + 2] = n02;
y0[h2 + 3] = n03;
y1[h2 + 2] = n12;
y1[h2 + 3] = n13;
y2[h2 + 2] = n22;
y2[h2 + 3] = n23;
y3[h2 + 2] = n32;
y3[h2 + 3] = n33;
}
x0 += (WORD32)npoints >> 1;
x2 += (WORD32)npoints >> 1;
}
}
Commit Message: Fix for stack corruption in esbr
Bug: 110769924
Test: poc from bug before/after
Change-Id: I99c6e89902064849ea1310c271064bdeccf7f20e
(cherry picked from commit 7e90d745c22695236437297cd8167a9312427a4a)
(cherry picked from commit 5464927f0c1fc721fa03d1c5be77b0b43dfffc50)
CWE ID: CWE-787 | VOID ixheaacd_esbr_postradixcompute2(WORD32 *ptr_y, WORD32 *ptr_x,
const WORD32 *pdig_rev_tbl,
WORD32 npoints) {
WORD32 i, k;
WORD32 h2;
WORD32 x_0, x_1, x_2, x_3;
WORD32 x_4, x_5, x_6, x_7;
WORD32 x_8, x_9, x_a, x_b, x_c, x_d, x_e, x_f;
WORD32 n0, j0;
WORD32 *x2, *x0;
WORD32 *y0, *y1, *y2, *y3;
y0 = ptr_y;
y2 = ptr_y + (WORD32)npoints;
x0 = ptr_x;
x2 = ptr_x + (WORD32)(npoints >> 1);
y1 = y0 + (WORD32)(npoints >> 2);
y3 = y2 + (WORD32)(npoints >> 2);
j0 = 8;
n0 = npoints >> 1;
for (k = 0; k < 2; k++) {
for (i = 0; i<npoints>> 1; i += 8) {
h2 = *pdig_rev_tbl++ >> 2;
x_0 = *x0++;
x_1 = *x0++;
x_2 = *x0++;
x_3 = *x0++;
x_4 = *x0++;
x_5 = *x0++;
x_6 = *x0++;
x_7 = *x0++;
y0[h2] = ixheaacd_add32_sat(x_0, x_2);
y0[h2 + 1] = ixheaacd_add32_sat(x_1, x_3);
y1[h2] = ixheaacd_add32_sat(x_4, x_6);
y1[h2 + 1] = ixheaacd_add32_sat(x_5, x_7);
y2[h2] = ixheaacd_sub32_sat(x_0, x_2);
y2[h2 + 1] = ixheaacd_sub32_sat(x_1, x_3);
y3[h2] = ixheaacd_sub32_sat(x_4, x_6);
y3[h2 + 1] = ixheaacd_sub32_sat(x_5, x_7);
x_8 = *x2++;
x_9 = *x2++;
x_a = *x2++;
x_b = *x2++;
x_c = *x2++;
x_d = *x2++;
x_e = *x2++;
x_f = *x2++;
y0[h2 + 2] = ixheaacd_add32_sat(x_8, x_a);
y0[h2 + 3] = ixheaacd_add32_sat(x_9, x_b);
y1[h2 + 2] = ixheaacd_add32_sat(x_c, x_e);
y1[h2 + 3] = ixheaacd_add32_sat(x_d, x_f);
y2[h2 + 2] = ixheaacd_sub32_sat(x_8, x_a);
y2[h2 + 3] = ixheaacd_sub32_sat(x_9, x_b);
y3[h2 + 2] = ixheaacd_sub32_sat(x_c, x_e);
y3[h2 + 3] = ixheaacd_sub32_sat(x_d, x_f);
}
x0 += (WORD32)npoints >> 1;
x2 += (WORD32)npoints >> 1;
}
}
| 174,087 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool DoResolveRelativeHost(const char* base_url,
const url_parse::Parsed& base_parsed,
const CHAR* relative_url,
const url_parse::Component& relative_component,
CharsetConverter* query_converter,
CanonOutput* output,
url_parse::Parsed* out_parsed) {
url_parse::Parsed relative_parsed; // Everything but the scheme is valid.
url_parse::ParseAfterScheme(&relative_url[relative_component.begin],
relative_component.len, relative_component.begin,
&relative_parsed);
Replacements<CHAR> replacements;
replacements.SetUsername(relative_url, relative_parsed.username);
replacements.SetPassword(relative_url, relative_parsed.password);
replacements.SetHost(relative_url, relative_parsed.host);
replacements.SetPort(relative_url, relative_parsed.port);
replacements.SetPath(relative_url, relative_parsed.path);
replacements.SetQuery(relative_url, relative_parsed.query);
replacements.SetRef(relative_url, relative_parsed.ref);
return ReplaceStandardURL(base_url, base_parsed, replacements,
query_converter, output, out_parsed);
}
Commit Message: Fix OOB read when parsing protocol-relative URLs
BUG=285742
Review URL: https://chromiumcodereview.appspot.com/23902014
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@223735 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | bool DoResolveRelativeHost(const char* base_url,
const url_parse::Parsed& base_parsed,
const CHAR* relative_url,
const url_parse::Component& relative_component,
CharsetConverter* query_converter,
CanonOutput* output,
url_parse::Parsed* out_parsed) {
url_parse::Parsed relative_parsed; // Everything but the scheme is valid.
url_parse::ParseAfterScheme(relative_url, relative_component.end(),
relative_component.begin, &relative_parsed);
Replacements<CHAR> replacements;
replacements.SetUsername(relative_url, relative_parsed.username);
replacements.SetPassword(relative_url, relative_parsed.password);
replacements.SetHost(relative_url, relative_parsed.host);
replacements.SetPort(relative_url, relative_parsed.port);
replacements.SetPath(relative_url, relative_parsed.path);
replacements.SetQuery(relative_url, relative_parsed.query);
replacements.SetRef(relative_url, relative_parsed.ref);
return ReplaceStandardURL(base_url, base_parsed, replacements,
query_converter, output, out_parsed);
}
| 171,192 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void onEndTest(void* self)
{
ASSERT(isMainThread());
webkit_support::QuitMessageLoop();
CCLayerTreeHostTest* test = static_cast<CCLayerTreeHostTest*>(self);
ASSERT(test);
test->m_layerTreeHost.clear();
}
Commit Message: [chromium] Fix shutdown race when posting main thread task to CCThreadProxy and enable tests
https://bugs.webkit.org/show_bug.cgi?id=70161
Reviewed by David Levin.
Source/WebCore:
Adds a weak pointer mechanism to cancel main thread tasks posted to CCThreadProxy instances from the compositor
thread. Previously there was a race condition where main thread tasks could run even after the CCThreadProxy was
destroyed.
This race does not exist in the other direction because when tearing down a CCThreadProxy we first post a quit
task to the compositor thread and then suspend execution of the main thread until all compositor tasks for the
CCThreadProxy have been drained.
Covered by the now-enabled CCLayerTreeHostTest* unit tests.
* WebCore.gypi:
* platform/graphics/chromium/cc/CCScopedMainThreadProxy.h: Added.
(WebCore::CCScopedMainThreadProxy::create):
(WebCore::CCScopedMainThreadProxy::postTask):
(WebCore::CCScopedMainThreadProxy::shutdown):
(WebCore::CCScopedMainThreadProxy::CCScopedMainThreadProxy):
(WebCore::CCScopedMainThreadProxy::runTaskIfNotShutdown):
* platform/graphics/chromium/cc/CCThreadProxy.cpp:
(WebCore::CCThreadProxy::CCThreadProxy):
(WebCore::CCThreadProxy::~CCThreadProxy):
(WebCore::CCThreadProxy::createBeginFrameAndCommitTaskOnCCThread):
* platform/graphics/chromium/cc/CCThreadProxy.h:
Source/WebKit/chromium:
Enables the CCLayerTreeHostTest* tests by default. Most tests are run twice in a single thread and multiple
thread configuration. Some tests run only in the multiple thread configuration if they depend on the compositor
thread scheduling draws by itself.
* tests/CCLayerTreeHostTest.cpp:
(::CCLayerTreeHostTest::timeout):
(::CCLayerTreeHostTest::clearTimeout):
(::CCLayerTreeHostTest::CCLayerTreeHostTest):
(::CCLayerTreeHostTest::onEndTest):
(::CCLayerTreeHostTest::TimeoutTask::TimeoutTask):
(::CCLayerTreeHostTest::TimeoutTask::clearTest):
(::CCLayerTreeHostTest::TimeoutTask::~TimeoutTask):
(::CCLayerTreeHostTest::TimeoutTask::Run):
(::CCLayerTreeHostTest::runTest):
(::CCLayerTreeHostTest::doBeginTest):
(::CCLayerTreeHostTestThreadOnly::runTest):
(::CCLayerTreeHostTestSetNeedsRedraw::commitCompleteOnCCThread):
git-svn-id: svn://svn.chromium.org/blink/trunk@97784 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-119 | static void onEndTest(void* self)
{
ASSERT(isMainThread());
webkit_support::QuitMessageLoop();
webkit_support::RunAllPendingMessages();
CCLayerTreeHostTest* test = static_cast<CCLayerTreeHostTest*>(self);
ASSERT(test);
test->m_layerTreeHost.clear();
}
| 170,294 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: gamma_image_validate(gamma_display *dp, png_const_structp pp,
png_infop pi)
{
/* Get some constants derived from the input and output file formats: */
PNG_CONST png_store* PNG_CONST ps = dp->this.ps;
PNG_CONST png_byte in_ct = dp->this.colour_type;
PNG_CONST png_byte in_bd = dp->this.bit_depth;
PNG_CONST png_uint_32 w = dp->this.w;
PNG_CONST png_uint_32 h = dp->this.h;
PNG_CONST size_t cbRow = dp->this.cbRow;
PNG_CONST png_byte out_ct = png_get_color_type(pp, pi);
PNG_CONST png_byte out_bd = png_get_bit_depth(pp, pi);
/* There are three sources of error, firstly the quantization in the
* file encoding, determined by sbit and/or the file depth, secondly
* the output (screen) gamma and thirdly the output file encoding.
*
* Since this API receives the screen and file gamma in double
* precision it is possible to calculate an exact answer given an input
* pixel value. Therefore we assume that the *input* value is exact -
* sample/maxsample - calculate the corresponding gamma corrected
* output to the limits of double precision arithmetic and compare with
* what libpng returns.
*
* Since the library must quantize the output to 8 or 16 bits there is
* a fundamental limit on the accuracy of the output of +/-.5 - this
* quantization limit is included in addition to the other limits
* specified by the paramaters to the API. (Effectively, add .5
* everywhere.)
*
* The behavior of the 'sbit' paramter is defined by section 12.5
* (sample depth scaling) of the PNG spec. That section forces the
* decoder to assume that the PNG values have been scaled if sBIT is
* present:
*
* png-sample = floor( input-sample * (max-out/max-in) + .5);
*
* This means that only a subset of the possible PNG values should
* appear in the input. However, the spec allows the encoder to use a
* variety of approximations to the above and doesn't require any
* restriction of the values produced.
*
* Nevertheless the spec requires that the upper 'sBIT' bits of the
* value stored in a PNG file be the original sample bits.
* Consequently the code below simply scales the top sbit bits by
* (1<<sbit)-1 to obtain an original sample value.
*
* Because there is limited precision in the input it is arguable that
* an acceptable result is any valid result from input-.5 to input+.5.
* The basic tests below do not do this, however if 'use_input_precision'
* is set a subsequent test is performed above.
*/
PNG_CONST unsigned int samples_per_pixel = (out_ct & 2U) ? 3U : 1U;
int processing;
png_uint_32 y;
PNG_CONST store_palette_entry *in_palette = dp->this.palette;
PNG_CONST int in_is_transparent = dp->this.is_transparent;
int out_npalette = -1;
int out_is_transparent = 0; /* Just refers to the palette case */
store_palette out_palette;
validate_info vi;
/* Check for row overwrite errors */
store_image_check(dp->this.ps, pp, 0);
/* Supply the input and output sample depths here - 8 for an indexed image,
* otherwise the bit depth.
*/
init_validate_info(&vi, dp, pp, in_ct==3?8:in_bd, out_ct==3?8:out_bd);
processing = (vi.gamma_correction > 0 && !dp->threshold_test)
|| in_bd != out_bd || in_ct != out_ct || vi.do_background;
/* TODO: FIX THIS: MAJOR BUG! If the transformations all happen inside
* the palette there is no way of finding out, because libpng fails to
* update the palette on png_read_update_info. Indeed, libpng doesn't
* even do the required work until much later, when it doesn't have any
* info pointer. Oops. For the moment 'processing' is turned off if
* out_ct is palette.
*/
if (in_ct == 3 && out_ct == 3)
processing = 0;
if (processing && out_ct == 3)
out_is_transparent = read_palette(out_palette, &out_npalette, pp, pi);
for (y=0; y<h; ++y)
{
png_const_bytep pRow = store_image_row(ps, pp, 0, y);
png_byte std[STANDARD_ROWMAX];
transform_row(pp, std, in_ct, in_bd, y);
if (processing)
{
unsigned int x;
for (x=0; x<w; ++x)
{
double alpha = 1; /* serves as a flag value */
/* Record the palette index for index images. */
PNG_CONST unsigned int in_index =
in_ct == 3 ? sample(std, 3, in_bd, x, 0) : 256;
PNG_CONST unsigned int out_index =
out_ct == 3 ? sample(std, 3, out_bd, x, 0) : 256;
/* Handle input alpha - png_set_background will cause the output
* alpha to disappear so there is nothing to check.
*/
if ((in_ct & PNG_COLOR_MASK_ALPHA) != 0 || (in_ct == 3 &&
in_is_transparent))
{
PNG_CONST unsigned int input_alpha = in_ct == 3 ?
dp->this.palette[in_index].alpha :
sample(std, in_ct, in_bd, x, samples_per_pixel);
unsigned int output_alpha = 65536 /* as a flag value */;
if (out_ct == 3)
{
if (out_is_transparent)
output_alpha = out_palette[out_index].alpha;
}
else if ((out_ct & PNG_COLOR_MASK_ALPHA) != 0)
output_alpha = sample(pRow, out_ct, out_bd, x,
samples_per_pixel);
if (output_alpha != 65536)
alpha = gamma_component_validate("alpha", &vi, input_alpha,
output_alpha, -1/*alpha*/, 0/*background*/);
else /* no alpha in output */
{
/* This is a copy of the calculation of 'i' above in order to
* have the alpha value to use in the background calculation.
*/
alpha = input_alpha >> vi.isbit_shift;
alpha /= vi.sbit_max;
}
}
/* Handle grayscale or RGB components. */
if ((in_ct & PNG_COLOR_MASK_COLOR) == 0) /* grayscale */
(void)gamma_component_validate("gray", &vi,
sample(std, in_ct, in_bd, x, 0),
sample(pRow, out_ct, out_bd, x, 0), alpha/*component*/,
vi.background_red);
else /* RGB or palette */
{
(void)gamma_component_validate("red", &vi,
in_ct == 3 ? in_palette[in_index].red :
sample(std, in_ct, in_bd, x, 0),
out_ct == 3 ? out_palette[out_index].red :
sample(pRow, out_ct, out_bd, x, 0),
alpha/*component*/, vi.background_red);
(void)gamma_component_validate("green", &vi,
in_ct == 3 ? in_palette[in_index].green :
sample(std, in_ct, in_bd, x, 1),
out_ct == 3 ? out_palette[out_index].green :
sample(pRow, out_ct, out_bd, x, 1),
alpha/*component*/, vi.background_green);
(void)gamma_component_validate("blue", &vi,
in_ct == 3 ? in_palette[in_index].blue :
sample(std, in_ct, in_bd, x, 2),
out_ct == 3 ? out_palette[out_index].blue :
sample(pRow, out_ct, out_bd, x, 2),
alpha/*component*/, vi.background_blue);
}
}
}
else if (memcmp(std, pRow, cbRow) != 0)
{
char msg[64];
/* No transform is expected on the threshold tests. */
sprintf(msg, "gamma: below threshold row %lu changed",
(unsigned long)y);
png_error(pp, msg);
}
} /* row (y) loop */
dp->this.ps->validated = 1;
}
Commit Message: DO NOT MERGE Update libpng to 1.6.20
BUG:23265085
Change-Id: I85199805636d771f3597b691b63bc0bf46084833
(cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
CWE ID: | gamma_image_validate(gamma_display *dp, png_const_structp pp,
png_infop pi)
{
/* Get some constants derived from the input and output file formats: */
const png_store* const ps = dp->this.ps;
const png_byte in_ct = dp->this.colour_type;
const png_byte in_bd = dp->this.bit_depth;
const png_uint_32 w = dp->this.w;
const png_uint_32 h = dp->this.h;
const size_t cbRow = dp->this.cbRow;
const png_byte out_ct = png_get_color_type(pp, pi);
const png_byte out_bd = png_get_bit_depth(pp, pi);
/* There are three sources of error, firstly the quantization in the
* file encoding, determined by sbit and/or the file depth, secondly
* the output (screen) gamma and thirdly the output file encoding.
*
* Since this API receives the screen and file gamma in double
* precision it is possible to calculate an exact answer given an input
* pixel value. Therefore we assume that the *input* value is exact -
* sample/maxsample - calculate the corresponding gamma corrected
* output to the limits of double precision arithmetic and compare with
* what libpng returns.
*
* Since the library must quantize the output to 8 or 16 bits there is
* a fundamental limit on the accuracy of the output of +/-.5 - this
* quantization limit is included in addition to the other limits
* specified by the paramaters to the API. (Effectively, add .5
* everywhere.)
*
* The behavior of the 'sbit' paramter is defined by section 12.5
* (sample depth scaling) of the PNG spec. That section forces the
* decoder to assume that the PNG values have been scaled if sBIT is
* present:
*
* png-sample = floor( input-sample * (max-out/max-in) + .5);
*
* This means that only a subset of the possible PNG values should
* appear in the input. However, the spec allows the encoder to use a
* variety of approximations to the above and doesn't require any
* restriction of the values produced.
*
* Nevertheless the spec requires that the upper 'sBIT' bits of the
* value stored in a PNG file be the original sample bits.
* Consequently the code below simply scales the top sbit bits by
* (1<<sbit)-1 to obtain an original sample value.
*
* Because there is limited precision in the input it is arguable that
* an acceptable result is any valid result from input-.5 to input+.5.
* The basic tests below do not do this, however if 'use_input_precision'
* is set a subsequent test is performed above.
*/
const unsigned int samples_per_pixel = (out_ct & 2U) ? 3U : 1U;
int processing;
png_uint_32 y;
const store_palette_entry *in_palette = dp->this.palette;
const int in_is_transparent = dp->this.is_transparent;
int process_tRNS;
int out_npalette = -1;
int out_is_transparent = 0; /* Just refers to the palette case */
store_palette out_palette;
validate_info vi;
/* Check for row overwrite errors */
store_image_check(dp->this.ps, pp, 0);
/* Supply the input and output sample depths here - 8 for an indexed image,
* otherwise the bit depth.
*/
init_validate_info(&vi, dp, pp, in_ct==3?8:in_bd, out_ct==3?8:out_bd);
processing = (vi.gamma_correction > 0 && !dp->threshold_test)
|| in_bd != out_bd || in_ct != out_ct || vi.do_background;
process_tRNS = dp->this.has_tRNS && vi.do_background;
/* TODO: FIX THIS: MAJOR BUG! If the transformations all happen inside
* the palette there is no way of finding out, because libpng fails to
* update the palette on png_read_update_info. Indeed, libpng doesn't
* even do the required work until much later, when it doesn't have any
* info pointer. Oops. For the moment 'processing' is turned off if
* out_ct is palette.
*/
if (in_ct == 3 && out_ct == 3)
processing = 0;
if (processing && out_ct == 3)
out_is_transparent = read_palette(out_palette, &out_npalette, pp, pi);
for (y=0; y<h; ++y)
{
png_const_bytep pRow = store_image_row(ps, pp, 0, y);
png_byte std[STANDARD_ROWMAX];
transform_row(pp, std, in_ct, in_bd, y);
if (processing)
{
unsigned int x;
for (x=0; x<w; ++x)
{
double alpha = 1; /* serves as a flag value */
/* Record the palette index for index images. */
const unsigned int in_index =
in_ct == 3 ? sample(std, 3, in_bd, x, 0, 0, 0) : 256;
const unsigned int out_index =
out_ct == 3 ? sample(std, 3, out_bd, x, 0, 0, 0) : 256;
/* Handle input alpha - png_set_background will cause the output
* alpha to disappear so there is nothing to check.
*/
if ((in_ct & PNG_COLOR_MASK_ALPHA) != 0 ||
(in_ct == 3 && in_is_transparent))
{
const unsigned int input_alpha = in_ct == 3 ?
dp->this.palette[in_index].alpha :
sample(std, in_ct, in_bd, x, samples_per_pixel, 0, 0);
unsigned int output_alpha = 65536 /* as a flag value */;
if (out_ct == 3)
{
if (out_is_transparent)
output_alpha = out_palette[out_index].alpha;
}
else if ((out_ct & PNG_COLOR_MASK_ALPHA) != 0)
output_alpha = sample(pRow, out_ct, out_bd, x,
samples_per_pixel, 0, 0);
if (output_alpha != 65536)
alpha = gamma_component_validate("alpha", &vi, input_alpha,
output_alpha, -1/*alpha*/, 0/*background*/);
else /* no alpha in output */
{
/* This is a copy of the calculation of 'i' above in order to
* have the alpha value to use in the background calculation.
*/
alpha = input_alpha >> vi.isbit_shift;
alpha /= vi.sbit_max;
}
}
else if (process_tRNS)
{
/* alpha needs to be set appropriately for this pixel, it is
* currently 1 and needs to be 0 for an input pixel which matches
* the values in tRNS.
*/
switch (in_ct)
{
case 0: /* gray */
if (sample(std, in_ct, in_bd, x, 0, 0, 0) ==
dp->this.transparent.red)
alpha = 0;
break;
case 2: /* RGB */
if (sample(std, in_ct, in_bd, x, 0, 0, 0) ==
dp->this.transparent.red &&
sample(std, in_ct, in_bd, x, 1, 0, 0) ==
dp->this.transparent.green &&
sample(std, in_ct, in_bd, x, 2, 0, 0) ==
dp->this.transparent.blue)
alpha = 0;
break;
default:
break;
}
}
/* Handle grayscale or RGB components. */
if ((in_ct & PNG_COLOR_MASK_COLOR) == 0) /* grayscale */
(void)gamma_component_validate("gray", &vi,
sample(std, in_ct, in_bd, x, 0, 0, 0),
sample(pRow, out_ct, out_bd, x, 0, 0, 0),
alpha/*component*/, vi.background_red);
else /* RGB or palette */
{
(void)gamma_component_validate("red", &vi,
in_ct == 3 ? in_palette[in_index].red :
sample(std, in_ct, in_bd, x, 0, 0, 0),
out_ct == 3 ? out_palette[out_index].red :
sample(pRow, out_ct, out_bd, x, 0, 0, 0),
alpha/*component*/, vi.background_red);
(void)gamma_component_validate("green", &vi,
in_ct == 3 ? in_palette[in_index].green :
sample(std, in_ct, in_bd, x, 1, 0, 0),
out_ct == 3 ? out_palette[out_index].green :
sample(pRow, out_ct, out_bd, x, 1, 0, 0),
alpha/*component*/, vi.background_green);
(void)gamma_component_validate("blue", &vi,
in_ct == 3 ? in_palette[in_index].blue :
sample(std, in_ct, in_bd, x, 2, 0, 0),
out_ct == 3 ? out_palette[out_index].blue :
sample(pRow, out_ct, out_bd, x, 2, 0, 0),
alpha/*component*/, vi.background_blue);
}
}
}
else if (memcmp(std, pRow, cbRow) != 0)
{
char msg[64];
/* No transform is expected on the threshold tests. */
sprintf(msg, "gamma: below threshold row %lu changed",
(unsigned long)y);
png_error(pp, msg);
}
} /* row (y) loop */
dp->this.ps->validated = 1;
}
| 173,612 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int lzxd_decompress(struct lzxd_stream *lzx, off_t out_bytes) {
/* bitstream and huffman reading variables */
register unsigned int bit_buffer;
register int bits_left, i=0;
unsigned char *i_ptr, *i_end;
register unsigned short sym;
int match_length, length_footer, extra, verbatim_bits, bytes_todo;
int this_run, main_element, aligned_bits, j;
unsigned char *window, *runsrc, *rundest, buf[12];
unsigned int frame_size=0, end_frame, match_offset, window_posn;
unsigned int R0, R1, R2;
/* easy answers */
if (!lzx || (out_bytes < 0)) return MSPACK_ERR_ARGS;
if (lzx->error) return lzx->error;
/* flush out any stored-up bytes before we begin */
i = lzx->o_end - lzx->o_ptr;
if ((off_t) i > out_bytes) i = (int) out_bytes;
if (i) {
if (lzx->sys->write(lzx->output, lzx->o_ptr, i) != i) {
return lzx->error = MSPACK_ERR_WRITE;
}
lzx->o_ptr += i;
lzx->offset += i;
out_bytes -= i;
}
if (out_bytes == 0) return MSPACK_ERR_OK;
/* restore local state */
RESTORE_BITS;
window = lzx->window;
window_posn = lzx->window_posn;
R0 = lzx->R0;
R1 = lzx->R1;
R2 = lzx->R2;
end_frame = (unsigned int)((lzx->offset + out_bytes) / LZX_FRAME_SIZE) + 1;
while (lzx->frame < end_frame) {
/* have we reached the reset interval? (if there is one?) */
if (lzx->reset_interval && ((lzx->frame % lzx->reset_interval) == 0)) {
if (lzx->block_remaining) {
D(("%d bytes remaining at reset interval", lzx->block_remaining))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* re-read the intel header and reset the huffman lengths */
lzxd_reset_state(lzx);
R0 = lzx->R0;
R1 = lzx->R1;
R2 = lzx->R2;
}
/* LZX DELTA format has chunk_size, not present in LZX format */
if (lzx->is_delta) {
ENSURE_BITS(16);
REMOVE_BITS(16);
}
/* read header if necessary */
if (!lzx->header_read) {
/* read 1 bit. if bit=0, intel filesize = 0.
* if bit=1, read intel filesize (32 bits) */
j = 0; READ_BITS(i, 1); if (i) { READ_BITS(i, 16); READ_BITS(j, 16); }
lzx->intel_filesize = (i << 16) | j;
lzx->header_read = 1;
}
/* calculate size of frame: all frames are 32k except the final frame
* which is 32kb or less. this can only be calculated when lzx->length
* has been filled in. */
frame_size = LZX_FRAME_SIZE;
if (lzx->length && (lzx->length - lzx->offset) < (off_t)frame_size) {
frame_size = lzx->length - lzx->offset;
}
/* decode until one more frame is available */
bytes_todo = lzx->frame_posn + frame_size - window_posn;
while (bytes_todo > 0) {
/* initialise new block, if one is needed */
if (lzx->block_remaining == 0) {
/* realign if previous block was an odd-sized UNCOMPRESSED block */
if ((lzx->block_type == LZX_BLOCKTYPE_UNCOMPRESSED) &&
(lzx->block_length & 1))
{
READ_IF_NEEDED;
i_ptr++;
}
/* read block type (3 bits) and block length (24 bits) */
READ_BITS(lzx->block_type, 3);
READ_BITS(i, 16); READ_BITS(j, 8);
lzx->block_remaining = lzx->block_length = (i << 8) | j;
/*D(("new block t%d len %u", lzx->block_type, lzx->block_length))*/
/* read individual block headers */
switch (lzx->block_type) {
case LZX_BLOCKTYPE_ALIGNED:
/* read lengths of and build aligned huffman decoding tree */
for (i = 0; i < 8; i++) { READ_BITS(j, 3); lzx->ALIGNED_len[i] = j; }
BUILD_TABLE(ALIGNED);
/* no break -- rest of aligned header is same as verbatim */
case LZX_BLOCKTYPE_VERBATIM:
/* read lengths of and build main huffman decoding tree */
READ_LENGTHS(MAINTREE, 0, 256);
READ_LENGTHS(MAINTREE, 256, LZX_NUM_CHARS + lzx->num_offsets);
BUILD_TABLE(MAINTREE);
/* if the literal 0xE8 is anywhere in the block... */
if (lzx->MAINTREE_len[0xE8] != 0) lzx->intel_started = 1;
/* read lengths of and build lengths huffman decoding tree */
READ_LENGTHS(LENGTH, 0, LZX_NUM_SECONDARY_LENGTHS);
BUILD_TABLE_MAYBE_EMPTY(LENGTH);
break;
case LZX_BLOCKTYPE_UNCOMPRESSED:
/* because we can't assume otherwise */
lzx->intel_started = 1;
/* read 1-16 (not 0-15) bits to align to bytes */
ENSURE_BITS(16);
if (bits_left > 16) i_ptr -= 2;
bits_left = 0; bit_buffer = 0;
/* read 12 bytes of stored R0 / R1 / R2 values */
for (rundest = &buf[0], i = 0; i < 12; i++) {
READ_IF_NEEDED;
*rundest++ = *i_ptr++;
}
R0 = buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
R1 = buf[4] | (buf[5] << 8) | (buf[6] << 16) | (buf[7] << 24);
R2 = buf[8] | (buf[9] << 8) | (buf[10] << 16) | (buf[11] << 24);
break;
default:
D(("bad block type"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
}
/* decode more of the block:
* run = min(what's available, what's needed) */
this_run = lzx->block_remaining;
if (this_run > bytes_todo) this_run = bytes_todo;
/* assume we decode exactly this_run bytes, for now */
bytes_todo -= this_run;
lzx->block_remaining -= this_run;
/* decode at least this_run bytes */
switch (lzx->block_type) {
case LZX_BLOCKTYPE_VERBATIM:
while (this_run > 0) {
READ_HUFFSYM(MAINTREE, main_element);
if (main_element < LZX_NUM_CHARS) {
/* literal: 0 to LZX_NUM_CHARS-1 */
window[window_posn++] = main_element;
this_run--;
}
else {
/* match: LZX_NUM_CHARS + ((slot<<3) | length_header (3 bits)) */
main_element -= LZX_NUM_CHARS;
/* get match length */
match_length = main_element & LZX_NUM_PRIMARY_LENGTHS;
if (match_length == LZX_NUM_PRIMARY_LENGTHS) {
if (lzx->LENGTH_empty) {
D(("LENGTH symbol needed but tree is empty"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
READ_HUFFSYM(LENGTH, length_footer);
match_length += length_footer;
}
match_length += LZX_MIN_MATCH;
/* get match offset */
switch ((match_offset = (main_element >> 3))) {
case 0: match_offset = R0; break;
case 1: match_offset = R1; R1=R0; R0 = match_offset; break;
case 2: match_offset = R2; R2=R0; R0 = match_offset; break;
case 3: match_offset = 1; R2=R1; R1=R0; R0 = match_offset; break;
default:
extra = (match_offset >= 36) ? 17 : extra_bits[match_offset];
READ_BITS(verbatim_bits, extra);
match_offset = position_base[match_offset] - 2 + verbatim_bits;
R2 = R1; R1 = R0; R0 = match_offset;
}
/* LZX DELTA uses max match length to signal even longer match */
if (match_length == LZX_MAX_MATCH && lzx->is_delta) {
int extra_len = 0;
ENSURE_BITS(3); /* 4 entry huffman tree */
if (PEEK_BITS(1) == 0) {
REMOVE_BITS(1); /* '0' -> 8 extra length bits */
READ_BITS(extra_len, 8);
}
else if (PEEK_BITS(2) == 2) {
REMOVE_BITS(2); /* '10' -> 10 extra length bits + 0x100 */
READ_BITS(extra_len, 10);
extra_len += 0x100;
}
else if (PEEK_BITS(3) == 6) {
REMOVE_BITS(3); /* '110' -> 12 extra length bits + 0x500 */
READ_BITS(extra_len, 12);
extra_len += 0x500;
}
else {
REMOVE_BITS(3); /* '111' -> 15 extra length bits */
READ_BITS(extra_len, 15);
}
match_length += extra_len;
}
if ((window_posn + match_length) > lzx->window_size) {
D(("match ran over window wrap"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* copy match */
rundest = &window[window_posn];
i = match_length;
/* does match offset wrap the window? */
if (match_offset > window_posn) {
if (match_offset > lzx->offset &&
(match_offset - window_posn) > lzx->ref_data_size)
{
D(("match offset beyond LZX stream"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* j = length from match offset to end of window */
j = match_offset - window_posn;
if (j > (int) lzx->window_size) {
D(("match offset beyond window boundaries"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
runsrc = &window[lzx->window_size - j];
if (j < i) {
/* if match goes over the window edge, do two copy runs */
i -= j; while (j-- > 0) *rundest++ = *runsrc++;
runsrc = window;
}
while (i-- > 0) *rundest++ = *runsrc++;
}
else {
runsrc = rundest - match_offset;
while (i-- > 0) *rundest++ = *runsrc++;
}
this_run -= match_length;
window_posn += match_length;
}
} /* while (this_run > 0) */
break;
case LZX_BLOCKTYPE_ALIGNED:
while (this_run > 0) {
READ_HUFFSYM(MAINTREE, main_element);
if (main_element < LZX_NUM_CHARS) {
/* literal: 0 to LZX_NUM_CHARS-1 */
window[window_posn++] = main_element;
this_run--;
}
else {
/* match: LZX_NUM_CHARS + ((slot<<3) | length_header (3 bits)) */
main_element -= LZX_NUM_CHARS;
/* get match length */
match_length = main_element & LZX_NUM_PRIMARY_LENGTHS;
if (match_length == LZX_NUM_PRIMARY_LENGTHS) {
if (lzx->LENGTH_empty) {
D(("LENGTH symbol needed but tree is empty"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
READ_HUFFSYM(LENGTH, length_footer);
match_length += length_footer;
}
match_length += LZX_MIN_MATCH;
/* get match offset */
switch ((match_offset = (main_element >> 3))) {
case 0: match_offset = R0; break;
case 1: match_offset = R1; R1 = R0; R0 = match_offset; break;
case 2: match_offset = R2; R2 = R0; R0 = match_offset; break;
default:
extra = (match_offset >= 36) ? 17 : extra_bits[match_offset];
match_offset = position_base[match_offset] - 2;
if (extra > 3) {
/* verbatim and aligned bits */
extra -= 3;
READ_BITS(verbatim_bits, extra);
match_offset += (verbatim_bits << 3);
READ_HUFFSYM(ALIGNED, aligned_bits);
match_offset += aligned_bits;
}
else if (extra == 3) {
/* aligned bits only */
READ_HUFFSYM(ALIGNED, aligned_bits);
match_offset += aligned_bits;
}
else if (extra > 0) { /* extra==1, extra==2 */
/* verbatim bits only */
READ_BITS(verbatim_bits, extra);
match_offset += verbatim_bits;
}
else /* extra == 0 */ {
/* ??? not defined in LZX specification! */
match_offset = 1;
}
/* update repeated offset LRU queue */
R2 = R1; R1 = R0; R0 = match_offset;
}
/* LZX DELTA uses max match length to signal even longer match */
if (match_length == LZX_MAX_MATCH && lzx->is_delta) {
int extra_len = 0;
ENSURE_BITS(3); /* 4 entry huffman tree */
if (PEEK_BITS(1) == 0) {
REMOVE_BITS(1); /* '0' -> 8 extra length bits */
READ_BITS(extra_len, 8);
}
else if (PEEK_BITS(2) == 2) {
REMOVE_BITS(2); /* '10' -> 10 extra length bits + 0x100 */
READ_BITS(extra_len, 10);
extra_len += 0x100;
}
else if (PEEK_BITS(3) == 6) {
REMOVE_BITS(3); /* '110' -> 12 extra length bits + 0x500 */
READ_BITS(extra_len, 12);
extra_len += 0x500;
}
else {
REMOVE_BITS(3); /* '111' -> 15 extra length bits */
READ_BITS(extra_len, 15);
}
match_length += extra_len;
}
if ((window_posn + match_length) > lzx->window_size) {
D(("match ran over window wrap"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* copy match */
rundest = &window[window_posn];
i = match_length;
/* does match offset wrap the window? */
if (match_offset > window_posn) {
if (match_offset > lzx->offset &&
(match_offset - window_posn) > lzx->ref_data_size)
{
D(("match offset beyond LZX stream"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* j = length from match offset to end of window */
j = match_offset - window_posn;
if (j > (int) lzx->window_size) {
D(("match offset beyond window boundaries"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
runsrc = &window[lzx->window_size - j];
if (j < i) {
/* if match goes over the window edge, do two copy runs */
i -= j; while (j-- > 0) *rundest++ = *runsrc++;
runsrc = window;
}
while (i-- > 0) *rundest++ = *runsrc++;
}
else {
runsrc = rundest - match_offset;
while (i-- > 0) *rundest++ = *runsrc++;
}
this_run -= match_length;
window_posn += match_length;
}
} /* while (this_run > 0) */
break;
case LZX_BLOCKTYPE_UNCOMPRESSED:
/* as this_run is limited not to wrap a frame, this also means it
* won't wrap the window (as the window is a multiple of 32k) */
rundest = &window[window_posn];
window_posn += this_run;
while (this_run > 0) {
if ((i = i_end - i_ptr) == 0) {
READ_IF_NEEDED;
}
else {
if (i > this_run) i = this_run;
lzx->sys->copy(i_ptr, rundest, (size_t) i);
rundest += i;
i_ptr += i;
this_run -= i;
}
}
break;
default:
return lzx->error = MSPACK_ERR_DECRUNCH; /* might as well */
}
/* did the final match overrun our desired this_run length? */
if (this_run < 0) {
if ((unsigned int)(-this_run) > lzx->block_remaining) {
D(("overrun went past end of block by %d (%d remaining)",
-this_run, lzx->block_remaining ))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
lzx->block_remaining -= -this_run;
}
} /* while (bytes_todo > 0) */
/* streams don't extend over frame boundaries */
if ((window_posn - lzx->frame_posn) != frame_size) {
D(("decode beyond output frame limits! %d != %d",
window_posn - lzx->frame_posn, frame_size))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* re-align input bitstream */
if (bits_left > 0) ENSURE_BITS(16);
if (bits_left & 15) REMOVE_BITS(bits_left & 15);
/* check that we've used all of the previous frame first */
if (lzx->o_ptr != lzx->o_end) {
D(("%ld avail bytes, new %d frame",
(long)(lzx->o_end - lzx->o_ptr), frame_size))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* does this intel block _really_ need decoding? */
if (lzx->intel_started && lzx->intel_filesize &&
(lzx->frame <= 32768) && (frame_size > 10))
{
unsigned char *data = &lzx->e8_buf[0];
unsigned char *dataend = &lzx->e8_buf[frame_size - 10];
signed int curpos = lzx->intel_curpos;
signed int filesize = lzx->intel_filesize;
signed int abs_off, rel_off;
/* copy e8 block to the e8 buffer and tweak if needed */
lzx->o_ptr = data;
lzx->sys->copy(&lzx->window[lzx->frame_posn], data, frame_size);
while (data < dataend) {
if (*data++ != 0xE8) { curpos++; continue; }
abs_off = data[0] | (data[1]<<8) | (data[2]<<16) | (data[3]<<24);
if ((abs_off >= -curpos) && (abs_off < filesize)) {
rel_off = (abs_off >= 0) ? abs_off - curpos : abs_off + filesize;
data[0] = (unsigned char) rel_off;
data[1] = (unsigned char) (rel_off >> 8);
data[2] = (unsigned char) (rel_off >> 16);
data[3] = (unsigned char) (rel_off >> 24);
}
data += 4;
curpos += 5;
}
lzx->intel_curpos += frame_size;
}
else {
lzx->o_ptr = &lzx->window[lzx->frame_posn];
if (lzx->intel_filesize) lzx->intel_curpos += frame_size;
}
lzx->o_end = &lzx->o_ptr[frame_size];
/* write a frame */
i = (out_bytes < (off_t)frame_size) ? (unsigned int)out_bytes : frame_size;
if (lzx->sys->write(lzx->output, lzx->o_ptr, i) != i) {
return lzx->error = MSPACK_ERR_WRITE;
}
lzx->o_ptr += i;
lzx->offset += i;
out_bytes -= i;
/* advance frame start position */
lzx->frame_posn += frame_size;
lzx->frame++;
/* wrap window / frame position pointers */
if (window_posn == lzx->window_size) window_posn = 0;
if (lzx->frame_posn == lzx->window_size) lzx->frame_posn = 0;
} /* while (lzx->frame < end_frame) */
if (out_bytes) {
D(("bytes left to output"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* store local state */
STORE_BITS;
lzx->window_posn = window_posn;
lzx->R0 = R0;
lzx->R1 = R1;
lzx->R2 = R2;
return MSPACK_ERR_OK;
}
Commit Message: Prevent a 1-byte underread of the input buffer if an odd-sized data block comes just before an uncompressed block header
CWE ID: CWE-189 | int lzxd_decompress(struct lzxd_stream *lzx, off_t out_bytes) {
/* bitstream and huffman reading variables */
register unsigned int bit_buffer;
register int bits_left, i=0;
unsigned char *i_ptr, *i_end;
register unsigned short sym;
int match_length, length_footer, extra, verbatim_bits, bytes_todo;
int this_run, main_element, aligned_bits, j;
unsigned char *window, *runsrc, *rundest, buf[12];
unsigned int frame_size=0, end_frame, match_offset, window_posn;
unsigned int R0, R1, R2;
/* easy answers */
if (!lzx || (out_bytes < 0)) return MSPACK_ERR_ARGS;
if (lzx->error) return lzx->error;
/* flush out any stored-up bytes before we begin */
i = lzx->o_end - lzx->o_ptr;
if ((off_t) i > out_bytes) i = (int) out_bytes;
if (i) {
if (lzx->sys->write(lzx->output, lzx->o_ptr, i) != i) {
return lzx->error = MSPACK_ERR_WRITE;
}
lzx->o_ptr += i;
lzx->offset += i;
out_bytes -= i;
}
if (out_bytes == 0) return MSPACK_ERR_OK;
/* restore local state */
RESTORE_BITS;
window = lzx->window;
window_posn = lzx->window_posn;
R0 = lzx->R0;
R1 = lzx->R1;
R2 = lzx->R2;
end_frame = (unsigned int)((lzx->offset + out_bytes) / LZX_FRAME_SIZE) + 1;
while (lzx->frame < end_frame) {
/* have we reached the reset interval? (if there is one?) */
if (lzx->reset_interval && ((lzx->frame % lzx->reset_interval) == 0)) {
if (lzx->block_remaining) {
D(("%d bytes remaining at reset interval", lzx->block_remaining))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* re-read the intel header and reset the huffman lengths */
lzxd_reset_state(lzx);
R0 = lzx->R0;
R1 = lzx->R1;
R2 = lzx->R2;
}
/* LZX DELTA format has chunk_size, not present in LZX format */
if (lzx->is_delta) {
ENSURE_BITS(16);
REMOVE_BITS(16);
}
/* read header if necessary */
if (!lzx->header_read) {
/* read 1 bit. if bit=0, intel filesize = 0.
* if bit=1, read intel filesize (32 bits) */
j = 0; READ_BITS(i, 1); if (i) { READ_BITS(i, 16); READ_BITS(j, 16); }
lzx->intel_filesize = (i << 16) | j;
lzx->header_read = 1;
}
/* calculate size of frame: all frames are 32k except the final frame
* which is 32kb or less. this can only be calculated when lzx->length
* has been filled in. */
frame_size = LZX_FRAME_SIZE;
if (lzx->length && (lzx->length - lzx->offset) < (off_t)frame_size) {
frame_size = lzx->length - lzx->offset;
}
/* decode until one more frame is available */
bytes_todo = lzx->frame_posn + frame_size - window_posn;
while (bytes_todo > 0) {
/* initialise new block, if one is needed */
if (lzx->block_remaining == 0) {
/* realign if previous block was an odd-sized UNCOMPRESSED block */
if ((lzx->block_type == LZX_BLOCKTYPE_UNCOMPRESSED) &&
(lzx->block_length & 1))
{
READ_IF_NEEDED;
i_ptr++;
}
/* read block type (3 bits) and block length (24 bits) */
READ_BITS(lzx->block_type, 3);
READ_BITS(i, 16); READ_BITS(j, 8);
lzx->block_remaining = lzx->block_length = (i << 8) | j;
/*D(("new block t%d len %u", lzx->block_type, lzx->block_length))*/
/* read individual block headers */
switch (lzx->block_type) {
case LZX_BLOCKTYPE_ALIGNED:
/* read lengths of and build aligned huffman decoding tree */
for (i = 0; i < 8; i++) { READ_BITS(j, 3); lzx->ALIGNED_len[i] = j; }
BUILD_TABLE(ALIGNED);
/* no break -- rest of aligned header is same as verbatim */
case LZX_BLOCKTYPE_VERBATIM:
/* read lengths of and build main huffman decoding tree */
READ_LENGTHS(MAINTREE, 0, 256);
READ_LENGTHS(MAINTREE, 256, LZX_NUM_CHARS + lzx->num_offsets);
BUILD_TABLE(MAINTREE);
/* if the literal 0xE8 is anywhere in the block... */
if (lzx->MAINTREE_len[0xE8] != 0) lzx->intel_started = 1;
/* read lengths of and build lengths huffman decoding tree */
READ_LENGTHS(LENGTH, 0, LZX_NUM_SECONDARY_LENGTHS);
BUILD_TABLE_MAYBE_EMPTY(LENGTH);
break;
case LZX_BLOCKTYPE_UNCOMPRESSED:
/* because we can't assume otherwise */
lzx->intel_started = 1;
/* read 1-16 (not 0-15) bits to align to bytes */
if (bits_left == 0) ENSURE_BITS(16);
bits_left = 0; bit_buffer = 0;
/* read 12 bytes of stored R0 / R1 / R2 values */
for (rundest = &buf[0], i = 0; i < 12; i++) {
READ_IF_NEEDED;
*rundest++ = *i_ptr++;
}
R0 = buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
R1 = buf[4] | (buf[5] << 8) | (buf[6] << 16) | (buf[7] << 24);
R2 = buf[8] | (buf[9] << 8) | (buf[10] << 16) | (buf[11] << 24);
break;
default:
D(("bad block type"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
}
/* decode more of the block:
* run = min(what's available, what's needed) */
this_run = lzx->block_remaining;
if (this_run > bytes_todo) this_run = bytes_todo;
/* assume we decode exactly this_run bytes, for now */
bytes_todo -= this_run;
lzx->block_remaining -= this_run;
/* decode at least this_run bytes */
switch (lzx->block_type) {
case LZX_BLOCKTYPE_VERBATIM:
while (this_run > 0) {
READ_HUFFSYM(MAINTREE, main_element);
if (main_element < LZX_NUM_CHARS) {
/* literal: 0 to LZX_NUM_CHARS-1 */
window[window_posn++] = main_element;
this_run--;
}
else {
/* match: LZX_NUM_CHARS + ((slot<<3) | length_header (3 bits)) */
main_element -= LZX_NUM_CHARS;
/* get match length */
match_length = main_element & LZX_NUM_PRIMARY_LENGTHS;
if (match_length == LZX_NUM_PRIMARY_LENGTHS) {
if (lzx->LENGTH_empty) {
D(("LENGTH symbol needed but tree is empty"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
READ_HUFFSYM(LENGTH, length_footer);
match_length += length_footer;
}
match_length += LZX_MIN_MATCH;
/* get match offset */
switch ((match_offset = (main_element >> 3))) {
case 0: match_offset = R0; break;
case 1: match_offset = R1; R1=R0; R0 = match_offset; break;
case 2: match_offset = R2; R2=R0; R0 = match_offset; break;
case 3: match_offset = 1; R2=R1; R1=R0; R0 = match_offset; break;
default:
extra = (match_offset >= 36) ? 17 : extra_bits[match_offset];
READ_BITS(verbatim_bits, extra);
match_offset = position_base[match_offset] - 2 + verbatim_bits;
R2 = R1; R1 = R0; R0 = match_offset;
}
/* LZX DELTA uses max match length to signal even longer match */
if (match_length == LZX_MAX_MATCH && lzx->is_delta) {
int extra_len = 0;
ENSURE_BITS(3); /* 4 entry huffman tree */
if (PEEK_BITS(1) == 0) {
REMOVE_BITS(1); /* '0' -> 8 extra length bits */
READ_BITS(extra_len, 8);
}
else if (PEEK_BITS(2) == 2) {
REMOVE_BITS(2); /* '10' -> 10 extra length bits + 0x100 */
READ_BITS(extra_len, 10);
extra_len += 0x100;
}
else if (PEEK_BITS(3) == 6) {
REMOVE_BITS(3); /* '110' -> 12 extra length bits + 0x500 */
READ_BITS(extra_len, 12);
extra_len += 0x500;
}
else {
REMOVE_BITS(3); /* '111' -> 15 extra length bits */
READ_BITS(extra_len, 15);
}
match_length += extra_len;
}
if ((window_posn + match_length) > lzx->window_size) {
D(("match ran over window wrap"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* copy match */
rundest = &window[window_posn];
i = match_length;
/* does match offset wrap the window? */
if (match_offset > window_posn) {
if (match_offset > lzx->offset &&
(match_offset - window_posn) > lzx->ref_data_size)
{
D(("match offset beyond LZX stream"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* j = length from match offset to end of window */
j = match_offset - window_posn;
if (j > (int) lzx->window_size) {
D(("match offset beyond window boundaries"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
runsrc = &window[lzx->window_size - j];
if (j < i) {
/* if match goes over the window edge, do two copy runs */
i -= j; while (j-- > 0) *rundest++ = *runsrc++;
runsrc = window;
}
while (i-- > 0) *rundest++ = *runsrc++;
}
else {
runsrc = rundest - match_offset;
while (i-- > 0) *rundest++ = *runsrc++;
}
this_run -= match_length;
window_posn += match_length;
}
} /* while (this_run > 0) */
break;
case LZX_BLOCKTYPE_ALIGNED:
while (this_run > 0) {
READ_HUFFSYM(MAINTREE, main_element);
if (main_element < LZX_NUM_CHARS) {
/* literal: 0 to LZX_NUM_CHARS-1 */
window[window_posn++] = main_element;
this_run--;
}
else {
/* match: LZX_NUM_CHARS + ((slot<<3) | length_header (3 bits)) */
main_element -= LZX_NUM_CHARS;
/* get match length */
match_length = main_element & LZX_NUM_PRIMARY_LENGTHS;
if (match_length == LZX_NUM_PRIMARY_LENGTHS) {
if (lzx->LENGTH_empty) {
D(("LENGTH symbol needed but tree is empty"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
READ_HUFFSYM(LENGTH, length_footer);
match_length += length_footer;
}
match_length += LZX_MIN_MATCH;
/* get match offset */
switch ((match_offset = (main_element >> 3))) {
case 0: match_offset = R0; break;
case 1: match_offset = R1; R1 = R0; R0 = match_offset; break;
case 2: match_offset = R2; R2 = R0; R0 = match_offset; break;
default:
extra = (match_offset >= 36) ? 17 : extra_bits[match_offset];
match_offset = position_base[match_offset] - 2;
if (extra > 3) {
/* verbatim and aligned bits */
extra -= 3;
READ_BITS(verbatim_bits, extra);
match_offset += (verbatim_bits << 3);
READ_HUFFSYM(ALIGNED, aligned_bits);
match_offset += aligned_bits;
}
else if (extra == 3) {
/* aligned bits only */
READ_HUFFSYM(ALIGNED, aligned_bits);
match_offset += aligned_bits;
}
else if (extra > 0) { /* extra==1, extra==2 */
/* verbatim bits only */
READ_BITS(verbatim_bits, extra);
match_offset += verbatim_bits;
}
else /* extra == 0 */ {
/* ??? not defined in LZX specification! */
match_offset = 1;
}
/* update repeated offset LRU queue */
R2 = R1; R1 = R0; R0 = match_offset;
}
/* LZX DELTA uses max match length to signal even longer match */
if (match_length == LZX_MAX_MATCH && lzx->is_delta) {
int extra_len = 0;
ENSURE_BITS(3); /* 4 entry huffman tree */
if (PEEK_BITS(1) == 0) {
REMOVE_BITS(1); /* '0' -> 8 extra length bits */
READ_BITS(extra_len, 8);
}
else if (PEEK_BITS(2) == 2) {
REMOVE_BITS(2); /* '10' -> 10 extra length bits + 0x100 */
READ_BITS(extra_len, 10);
extra_len += 0x100;
}
else if (PEEK_BITS(3) == 6) {
REMOVE_BITS(3); /* '110' -> 12 extra length bits + 0x500 */
READ_BITS(extra_len, 12);
extra_len += 0x500;
}
else {
REMOVE_BITS(3); /* '111' -> 15 extra length bits */
READ_BITS(extra_len, 15);
}
match_length += extra_len;
}
if ((window_posn + match_length) > lzx->window_size) {
D(("match ran over window wrap"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* copy match */
rundest = &window[window_posn];
i = match_length;
/* does match offset wrap the window? */
if (match_offset > window_posn) {
if (match_offset > lzx->offset &&
(match_offset - window_posn) > lzx->ref_data_size)
{
D(("match offset beyond LZX stream"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* j = length from match offset to end of window */
j = match_offset - window_posn;
if (j > (int) lzx->window_size) {
D(("match offset beyond window boundaries"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
runsrc = &window[lzx->window_size - j];
if (j < i) {
/* if match goes over the window edge, do two copy runs */
i -= j; while (j-- > 0) *rundest++ = *runsrc++;
runsrc = window;
}
while (i-- > 0) *rundest++ = *runsrc++;
}
else {
runsrc = rundest - match_offset;
while (i-- > 0) *rundest++ = *runsrc++;
}
this_run -= match_length;
window_posn += match_length;
}
} /* while (this_run > 0) */
break;
case LZX_BLOCKTYPE_UNCOMPRESSED:
/* as this_run is limited not to wrap a frame, this also means it
* won't wrap the window (as the window is a multiple of 32k) */
rundest = &window[window_posn];
window_posn += this_run;
while (this_run > 0) {
if ((i = i_end - i_ptr) == 0) {
READ_IF_NEEDED;
}
else {
if (i > this_run) i = this_run;
lzx->sys->copy(i_ptr, rundest, (size_t) i);
rundest += i;
i_ptr += i;
this_run -= i;
}
}
break;
default:
return lzx->error = MSPACK_ERR_DECRUNCH; /* might as well */
}
/* did the final match overrun our desired this_run length? */
if (this_run < 0) {
if ((unsigned int)(-this_run) > lzx->block_remaining) {
D(("overrun went past end of block by %d (%d remaining)",
-this_run, lzx->block_remaining ))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
lzx->block_remaining -= -this_run;
}
} /* while (bytes_todo > 0) */
/* streams don't extend over frame boundaries */
if ((window_posn - lzx->frame_posn) != frame_size) {
D(("decode beyond output frame limits! %d != %d",
window_posn - lzx->frame_posn, frame_size))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* re-align input bitstream */
if (bits_left > 0) ENSURE_BITS(16);
if (bits_left & 15) REMOVE_BITS(bits_left & 15);
/* check that we've used all of the previous frame first */
if (lzx->o_ptr != lzx->o_end) {
D(("%ld avail bytes, new %d frame",
(long)(lzx->o_end - lzx->o_ptr), frame_size))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* does this intel block _really_ need decoding? */
if (lzx->intel_started && lzx->intel_filesize &&
(lzx->frame <= 32768) && (frame_size > 10))
{
unsigned char *data = &lzx->e8_buf[0];
unsigned char *dataend = &lzx->e8_buf[frame_size - 10];
signed int curpos = lzx->intel_curpos;
signed int filesize = lzx->intel_filesize;
signed int abs_off, rel_off;
/* copy e8 block to the e8 buffer and tweak if needed */
lzx->o_ptr = data;
lzx->sys->copy(&lzx->window[lzx->frame_posn], data, frame_size);
while (data < dataend) {
if (*data++ != 0xE8) { curpos++; continue; }
abs_off = data[0] | (data[1]<<8) | (data[2]<<16) | (data[3]<<24);
if ((abs_off >= -curpos) && (abs_off < filesize)) {
rel_off = (abs_off >= 0) ? abs_off - curpos : abs_off + filesize;
data[0] = (unsigned char) rel_off;
data[1] = (unsigned char) (rel_off >> 8);
data[2] = (unsigned char) (rel_off >> 16);
data[3] = (unsigned char) (rel_off >> 24);
}
data += 4;
curpos += 5;
}
lzx->intel_curpos += frame_size;
}
else {
lzx->o_ptr = &lzx->window[lzx->frame_posn];
if (lzx->intel_filesize) lzx->intel_curpos += frame_size;
}
lzx->o_end = &lzx->o_ptr[frame_size];
/* write a frame */
i = (out_bytes < (off_t)frame_size) ? (unsigned int)out_bytes : frame_size;
if (lzx->sys->write(lzx->output, lzx->o_ptr, i) != i) {
return lzx->error = MSPACK_ERR_WRITE;
}
lzx->o_ptr += i;
lzx->offset += i;
out_bytes -= i;
/* advance frame start position */
lzx->frame_posn += frame_size;
lzx->frame++;
/* wrap window / frame position pointers */
if (window_posn == lzx->window_size) window_posn = 0;
if (lzx->frame_posn == lzx->window_size) lzx->frame_posn = 0;
} /* while (lzx->frame < end_frame) */
if (out_bytes) {
D(("bytes left to output"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* store local state */
STORE_BITS;
lzx->window_posn = window_posn;
lzx->R0 = R0;
lzx->R1 = R1;
lzx->R2 = R2;
return MSPACK_ERR_OK;
}
| 166,612 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void rpng2_x_redisplay_image(ulg startcol, ulg startrow,
ulg width, ulg height)
{
uch bg_red = rpng2_info.bg_red;
uch bg_green = rpng2_info.bg_green;
uch bg_blue = rpng2_info.bg_blue;
uch *src, *src2=NULL;
char *dest;
uch r, g, b, a;
ulg i, row, lastrow = 0;
ulg pixel;
int ximage_rowbytes = ximage->bytes_per_line;
Trace((stderr, "beginning display loop (image_channels == %d)\n",
rpng2_info.channels))
Trace((stderr, " (width = %ld, rowbytes = %d, ximage_rowbytes = %d)\n",
rpng2_info.width, rpng2_info.rowbytes, ximage_rowbytes))
Trace((stderr, " (bpp = %d)\n", ximage->bits_per_pixel))
Trace((stderr, " (byte_order = %s)\n", ximage->byte_order == MSBFirst?
"MSBFirst" : (ximage->byte_order == LSBFirst? "LSBFirst" : "unknown")))
/*---------------------------------------------------------------------------
Aside from the use of the rpng2_info struct and of src2 (for background
image), this routine is identical to rpng_x_display_image() in the non-
progressive version of the program--for the simple reason that redisplay
of the image against a new background happens after the image is fully
decoded and therefore is, by definition, non-progressive.
---------------------------------------------------------------------------*/
if (depth == 24 || depth == 32) {
ulg red, green, blue;
int bpp = ximage->bits_per_pixel;
for (lastrow = row = startrow; row < startrow+height; ++row) {
src = rpng2_info.image_data + row*rpng2_info.rowbytes;
if (bg_image)
src2 = bg_data + row*bg_rowbytes;
dest = ximage->data + row*ximage_rowbytes;
if (rpng2_info.channels == 3) {
for (i = rpng2_info.width; i > 0; --i) {
red = *src++;
green = *src++;
blue = *src++;
#ifdef NO_24BIT_MASKS
pixel = (red << RShift) |
(green << GShift) |
(blue << BShift);
/* recall that we set ximage->byte_order = MSBFirst above */
if (bpp == 32) {
*dest++ = (char)((pixel >> 24) & 0xff);
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
} else {
/* this assumes bpp == 24 & bits are packed low */
/* (probably need to use RShift, RMask, etc.) */
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
#else
red = (RShift < 0)? red << (-RShift) : red >> RShift;
green = (GShift < 0)? green << (-GShift) : green >> GShift;
blue = (BShift < 0)? blue << (-BShift) : blue >> BShift;
pixel = (red & RMask) | (green & GMask) | (blue & BMask);
/* recall that we set ximage->byte_order = MSBFirst above */
if (bpp == 32) {
*dest++ = (char)((pixel >> 24) & 0xff);
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
} else {
/* GRR BUG */
/* this assumes bpp == 24 & bits are packed low */
/* (probably need to use RShift/RMask/etc. here, too) */
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
#endif
}
} else /* if (rpng2_info.channels == 4) */ {
for (i = rpng2_info.width; i > 0; --i) {
r = *src++;
g = *src++;
b = *src++;
a = *src++;
if (bg_image) {
bg_red = *src2++;
bg_green = *src2++;
bg_blue = *src2++;
}
if (a == 255) {
red = r;
green = g;
blue = b;
} else if (a == 0) {
red = bg_red;
green = bg_green;
blue = bg_blue;
} else {
/* this macro (from png.h) composites the foreground
* and background values and puts the result into the
* first argument */
alpha_composite(red, r, a, bg_red);
alpha_composite(green, g, a, bg_green);
alpha_composite(blue, b, a, bg_blue);
}
#ifdef NO_24BIT_MASKS
pixel = (red << RShift) |
(green << GShift) |
(blue << BShift);
/* recall that we set ximage->byte_order = MSBFirst above */
if (bpp == 32) {
*dest++ = (char)((pixel >> 24) & 0xff);
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
} else {
/* this assumes bpp == 24 & bits are packed low */
/* (probably need to use RShift, RMask, etc.) */
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
#else
red = (RShift < 0)? red << (-RShift) : red >> RShift;
green = (GShift < 0)? green << (-GShift) : green >> GShift;
blue = (BShift < 0)? blue << (-BShift) : blue >> BShift;
pixel = (red & RMask) | (green & GMask) | (blue & BMask);
/* recall that we set ximage->byte_order = MSBFirst above */
if (bpp == 32) {
*dest++ = (char)((pixel >> 24) & 0xff);
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
} else {
/* GRR BUG */
/* this assumes bpp == 24 & bits are packed low */
/* (probably need to use RShift/RMask/etc. here, too) */
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
#endif
}
}
/* display after every 16 lines */
if (((row+1) & 0xf) == 0) {
XPutImage(display, window, gc, ximage, 0, (int)lastrow, 0,
(int)lastrow, rpng2_info.width, 16);
XFlush(display);
lastrow = row + 1;
}
}
} else if (depth == 16) {
ush red, green, blue;
for (lastrow = row = startrow; row < startrow+height; ++row) {
src = rpng2_info.row_pointers[row];
if (bg_image)
src2 = bg_data + row*bg_rowbytes;
dest = ximage->data + row*ximage_rowbytes;
if (rpng2_info.channels == 3) {
for (i = rpng2_info.width; i > 0; --i) {
red = ((ush)(*src) << 8);
++src;
green = ((ush)(*src) << 8);
++src;
blue = ((ush)(*src) << 8);
++src;
pixel = ((red >> RShift) & RMask) |
((green >> GShift) & GMask) |
((blue >> BShift) & BMask);
/* recall that we set ximage->byte_order = MSBFirst above */
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
} else /* if (rpng2_info.channels == 4) */ {
for (i = rpng2_info.width; i > 0; --i) {
r = *src++;
g = *src++;
b = *src++;
a = *src++;
if (bg_image) {
bg_red = *src2++;
bg_green = *src2++;
bg_blue = *src2++;
}
if (a == 255) {
red = ((ush)r << 8);
green = ((ush)g << 8);
blue = ((ush)b << 8);
} else if (a == 0) {
red = ((ush)bg_red << 8);
green = ((ush)bg_green << 8);
blue = ((ush)bg_blue << 8);
} else {
/* this macro (from png.h) composites the foreground
* and background values and puts the result back into
* the first argument (== fg byte here: safe) */
alpha_composite(r, r, a, bg_red);
alpha_composite(g, g, a, bg_green);
alpha_composite(b, b, a, bg_blue);
red = ((ush)r << 8);
green = ((ush)g << 8);
blue = ((ush)b << 8);
}
pixel = ((red >> RShift) & RMask) |
((green >> GShift) & GMask) |
((blue >> BShift) & BMask);
/* recall that we set ximage->byte_order = MSBFirst above */
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
}
/* display after every 16 lines */
if (((row+1) & 0xf) == 0) {
XPutImage(display, window, gc, ximage, 0, (int)lastrow, 0,
(int)lastrow, rpng2_info.width, 16);
XFlush(display);
lastrow = row + 1;
}
}
} else /* depth == 8 */ {
/* GRR: add 8-bit support */
}
Trace((stderr, "calling final XPutImage()\n"))
if (lastrow < startrow+height) {
XPutImage(display, window, gc, ximage, 0, (int)lastrow, 0,
(int)lastrow, rpng2_info.width, rpng2_info.height-lastrow);
XFlush(display);
}
} /* end function rpng2_x_redisplay_image() */
Commit Message: DO NOT MERGE Update libpng to 1.6.20
BUG:23265085
Change-Id: I85199805636d771f3597b691b63bc0bf46084833
(cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
CWE ID: | static void rpng2_x_redisplay_image(ulg startcol, ulg startrow,
ulg width, ulg height)
{
uch bg_red = rpng2_info.bg_red;
uch bg_green = rpng2_info.bg_green;
uch bg_blue = rpng2_info.bg_blue;
uch *src, *src2=NULL;
char *dest;
uch r, g, b, a;
ulg i, row, lastrow = 0;
ulg pixel;
int ximage_rowbytes = ximage->bytes_per_line;
Trace((stderr, "beginning display loop (image_channels == %d)\n",
rpng2_info.channels))
Trace((stderr, " (width = %ld, rowbytes = %d, ximage_rowbytes = %d)\n",
rpng2_info.width, rpng2_info.rowbytes, ximage_rowbytes))
Trace((stderr, " (bpp = %d)\n", ximage->bits_per_pixel))
Trace((stderr, " (byte_order = %s)\n", ximage->byte_order == MSBFirst?
"MSBFirst" : (ximage->byte_order == LSBFirst? "LSBFirst" : "unknown")))
/*---------------------------------------------------------------------------
Aside from the use of the rpng2_info struct and of src2 (for background
image), this routine is identical to rpng_x_display_image() in the non-
progressive version of the program--for the simple reason that redisplay
of the image against a new background happens after the image is fully
decoded and therefore is, by definition, non-progressive.
---------------------------------------------------------------------------*/
if (depth == 24 || depth == 32) {
ulg red, green, blue;
int bpp = ximage->bits_per_pixel;
for (lastrow = row = startrow; row < startrow+height; ++row) {
src = rpng2_info.image_data + row*rpng2_info.rowbytes;
if (bg_image)
src2 = bg_data + row*bg_rowbytes;
dest = ximage->data + row*ximage_rowbytes;
if (rpng2_info.channels == 3) {
for (i = rpng2_info.width; i > 0; --i) {
red = *src++;
green = *src++;
blue = *src++;
#ifdef NO_24BIT_MASKS
pixel = (red << RShift) |
(green << GShift) |
(blue << BShift);
/* recall that we set ximage->byte_order = MSBFirst above */
if (bpp == 32) {
*dest++ = (char)((pixel >> 24) & 0xff);
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
} else {
/* this assumes bpp == 24 & bits are packed low */
/* (probably need to use RShift, RMask, etc.) */
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
#else
red = (RShift < 0)? red << (-RShift) : red >> RShift;
green = (GShift < 0)? green << (-GShift) : green >> GShift;
blue = (BShift < 0)? blue << (-BShift) : blue >> BShift;
pixel = (red & RMask) | (green & GMask) | (blue & BMask);
/* recall that we set ximage->byte_order = MSBFirst above */
if (bpp == 32) {
*dest++ = (char)((pixel >> 24) & 0xff);
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
} else {
/* GRR BUG */
/* this assumes bpp == 24 & bits are packed low */
/* (probably need to use RShift/RMask/etc. here, too) */
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
#endif
}
} else /* if (rpng2_info.channels == 4) */ {
for (i = rpng2_info.width; i > 0; --i) {
r = *src++;
g = *src++;
b = *src++;
a = *src++;
if (bg_image) {
bg_red = *src2++;
bg_green = *src2++;
bg_blue = *src2++;
}
if (a == 255) {
red = r;
green = g;
blue = b;
} else if (a == 0) {
red = bg_red;
green = bg_green;
blue = bg_blue;
} else {
/* this macro (from png.h) composites the foreground
* and background values and puts the result into the
* first argument */
alpha_composite(red, r, a, bg_red);
alpha_composite(green, g, a, bg_green);
alpha_composite(blue, b, a, bg_blue);
}
#ifdef NO_24BIT_MASKS
pixel = (red << RShift) |
(green << GShift) |
(blue << BShift);
/* recall that we set ximage->byte_order = MSBFirst above */
if (bpp == 32) {
*dest++ = (char)((pixel >> 24) & 0xff);
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
} else {
/* this assumes bpp == 24 & bits are packed low */
/* (probably need to use RShift, RMask, etc.) */
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
#else
red = (RShift < 0)? red << (-RShift) : red >> RShift;
green = (GShift < 0)? green << (-GShift) : green >> GShift;
blue = (BShift < 0)? blue << (-BShift) : blue >> BShift;
pixel = (red & RMask) | (green & GMask) | (blue & BMask);
/* recall that we set ximage->byte_order = MSBFirst above */
if (bpp == 32) {
*dest++ = (char)((pixel >> 24) & 0xff);
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
} else {
/* GRR BUG */
/* this assumes bpp == 24 & bits are packed low */
/* (probably need to use RShift/RMask/etc. here, too) */
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
#endif
}
}
/* display after every 16 lines */
if (((row+1) & 0xf) == 0) {
XPutImage(display, window, gc, ximage, 0, (int)lastrow, 0,
(int)lastrow, rpng2_info.width, 16);
XFlush(display);
lastrow = row + 1;
}
}
} else if (depth == 16) {
ush red, green, blue;
for (lastrow = row = startrow; row < startrow+height; ++row) {
src = rpng2_info.row_pointers[row];
if (bg_image)
src2 = bg_data + row*bg_rowbytes;
dest = ximage->data + row*ximage_rowbytes;
if (rpng2_info.channels == 3) {
for (i = rpng2_info.width; i > 0; --i) {
red = ((ush)(*src) << 8);
++src;
green = ((ush)(*src) << 8);
++src;
blue = ((ush)(*src) << 8);
++src;
pixel = ((red >> RShift) & RMask) |
((green >> GShift) & GMask) |
((blue >> BShift) & BMask);
/* recall that we set ximage->byte_order = MSBFirst above */
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
} else /* if (rpng2_info.channels == 4) */ {
for (i = rpng2_info.width; i > 0; --i) {
r = *src++;
g = *src++;
b = *src++;
a = *src++;
if (bg_image) {
bg_red = *src2++;
bg_green = *src2++;
bg_blue = *src2++;
}
if (a == 255) {
red = ((ush)r << 8);
green = ((ush)g << 8);
blue = ((ush)b << 8);
} else if (a == 0) {
red = ((ush)bg_red << 8);
green = ((ush)bg_green << 8);
blue = ((ush)bg_blue << 8);
} else {
/* this macro (from png.h) composites the foreground
* and background values and puts the result back into
* the first argument (== fg byte here: safe) */
alpha_composite(r, r, a, bg_red);
alpha_composite(g, g, a, bg_green);
alpha_composite(b, b, a, bg_blue);
red = ((ush)r << 8);
green = ((ush)g << 8);
blue = ((ush)b << 8);
}
pixel = ((red >> RShift) & RMask) |
((green >> GShift) & GMask) |
((blue >> BShift) & BMask);
/* recall that we set ximage->byte_order = MSBFirst above */
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
}
/* display after every 16 lines */
if (((row+1) & 0xf) == 0) {
XPutImage(display, window, gc, ximage, 0, (int)lastrow, 0,
(int)lastrow, rpng2_info.width, 16);
XFlush(display);
lastrow = row + 1;
}
}
} else /* depth == 8 */ {
/* GRR: add 8-bit support */
}
Trace((stderr, "calling final XPutImage()\n"))
if (lastrow < startrow+height) {
XPutImage(display, window, gc, ximage, 0, (int)lastrow, 0,
(int)lastrow, rpng2_info.width, rpng2_info.height-lastrow);
XFlush(display);
}
(void)startcol;
(void)width;
} /* end function rpng2_x_redisplay_image() */
| 173,575 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: gss_get_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
gss_qop_t qop_req, gss_iov_buffer_desc *iov, int iov_count)
{
OM_uint32 status;
gss_union_ctx_id_t ctx;
gss_mechanism mech;
status = val_wrap_iov_args(minor_status, context_handle, 0, qop_req, NULL,
iov, iov_count);
if (status != GSS_S_COMPLETE)
return status;
/* Select the approprate underlying mechanism routine and call it. */
ctx = (gss_union_ctx_id_t)context_handle;
mech = gssint_get_mechanism(ctx->mech_type);
if (mech == NULL)
return GSS_S_BAD_MECH;
if (mech->gss_get_mic_iov == NULL)
return GSS_S_UNAVAILABLE;
status = mech->gss_get_mic_iov(minor_status, ctx->internal_ctx_id, qop_req,
iov, iov_count);
if (status != GSS_S_COMPLETE)
map_error(minor_status, mech);
return status;
}
Commit Message: Preserve GSS context on init/accept failure
After gss_init_sec_context() or gss_accept_sec_context() has created a
context, don't delete the mechglue context on failures from subsequent
calls, even if the mechanism deletes the mech-specific context (which
is allowed by RFC 2744 but not preferred). Check for union contexts
with no mechanism context in each GSS function which accepts a
gss_ctx_id_t.
CVE-2017-11462:
RFC 2744 permits a GSS-API implementation to delete an existing
security context on a second or subsequent call to
gss_init_sec_context() or gss_accept_sec_context() if the call results
in an error. This API behavior has been found to be dangerous,
leading to the possibility of memory errors in some callers. For
safety, GSS-API implementations should instead preserve existing
security contexts on error until the caller deletes them.
All versions of MIT krb5 prior to this change may delete acceptor
contexts on error. Versions 1.13.4 through 1.13.7, 1.14.1 through
1.14.5, and 1.15 through 1.15.1 may also delete initiator contexts on
error.
ticket: 8598 (new)
target_version: 1.15-next
target_version: 1.14-next
tags: pullup
CWE ID: CWE-415 | gss_get_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
gss_qop_t qop_req, gss_iov_buffer_desc *iov, int iov_count)
{
OM_uint32 status;
gss_union_ctx_id_t ctx;
gss_mechanism mech;
status = val_wrap_iov_args(minor_status, context_handle, 0, qop_req, NULL,
iov, iov_count);
if (status != GSS_S_COMPLETE)
return status;
/* Select the approprate underlying mechanism routine and call it. */
ctx = (gss_union_ctx_id_t)context_handle;
if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT)
return GSS_S_NO_CONTEXT;
mech = gssint_get_mechanism(ctx->mech_type);
if (mech == NULL)
return GSS_S_BAD_MECH;
if (mech->gss_get_mic_iov == NULL)
return GSS_S_UNAVAILABLE;
status = mech->gss_get_mic_iov(minor_status, ctx->internal_ctx_id, qop_req,
iov, iov_count);
if (status != GSS_S_COMPLETE)
map_error(minor_status, mech);
return status;
}
| 168,029 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void OPENSSL_fork_child(void)
{
rand_fork();
}
Commit Message:
CWE ID: CWE-330 | void OPENSSL_fork_child(void)
{
}
| 165,142 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void inet6_destroy_sock(struct sock *sk)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff *skb;
struct ipv6_txoptions *opt;
/* Release rx options */
skb = xchg(&np->pktoptions, NULL);
if (skb)
kfree_skb(skb);
skb = xchg(&np->rxpmtu, NULL);
if (skb)
kfree_skb(skb);
/* Free flowlabels */
fl6_free_socklist(sk);
/* Free tx options */
opt = xchg(&np->opt, NULL);
if (opt)
sock_kfree_s(sk, opt, opt->tot_len);
}
Commit Message: ipv6: add complete rcu protection around np->opt
This patch addresses multiple problems :
UDP/RAW sendmsg() need to get a stable struct ipv6_txoptions
while socket is not locked : Other threads can change np->opt
concurrently. Dmitry posted a syzkaller
(http://github.com/google/syzkaller) program desmonstrating
use-after-free.
Starting with TCP/DCCP lockless listeners, tcp_v6_syn_recv_sock()
and dccp_v6_request_recv_sock() also need to use RCU protection
to dereference np->opt once (before calling ipv6_dup_options())
This patch adds full RCU protection to np->opt
Reported-by: Dmitry Vyukov <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-416 | void inet6_destroy_sock(struct sock *sk)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff *skb;
struct ipv6_txoptions *opt;
/* Release rx options */
skb = xchg(&np->pktoptions, NULL);
if (skb)
kfree_skb(skb);
skb = xchg(&np->rxpmtu, NULL);
if (skb)
kfree_skb(skb);
/* Free flowlabels */
fl6_free_socklist(sk);
/* Free tx options */
opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL);
if (opt) {
atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
txopt_put(opt);
}
}
| 167,327 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: status_t MediaHTTP::connect(
const char *uri,
const KeyedVector<String8, String8> *headers,
off64_t /* offset */) {
if (mInitCheck != OK) {
return mInitCheck;
}
KeyedVector<String8, String8> extHeaders;
if (headers != NULL) {
extHeaders = *headers;
}
if (extHeaders.indexOfKey(String8("User-Agent")) < 0) {
extHeaders.add(String8("User-Agent"), String8(MakeUserAgent().c_str()));
}
bool success = mHTTPConnection->connect(uri, &extHeaders);
mLastHeaders = extHeaders;
mLastURI = uri;
mCachedSizeValid = false;
if (success) {
AString sanitized = uriDebugString(uri);
mName = String8::format("MediaHTTP(%s)", sanitized.c_str());
}
return success ? OK : UNKNOWN_ERROR;
}
Commit Message: Fix free-after-use for MediaHTTP
fix free-after-use when we reconnect to an HTTP media source.
Change-Id: I96da5a79f5382409a545f8b4e22a24523f287464
Tests: compilation and eyeballs
Bug: 31373622
(cherry picked from commit dd81e1592ffa77812998b05761eb840b70fed121)
CWE ID: CWE-119 | status_t MediaHTTP::connect(
const char *uri,
const KeyedVector<String8, String8> *headers,
off64_t /* offset */) {
if (mInitCheck != OK) {
return mInitCheck;
}
KeyedVector<String8, String8> extHeaders;
if (headers != NULL) {
extHeaders = *headers;
}
if (extHeaders.indexOfKey(String8("User-Agent")) < 0) {
extHeaders.add(String8("User-Agent"), String8(MakeUserAgent().c_str()));
}
mLastURI = uri;
// reconnect() calls with uri == old mLastURI.c_str(), which gets zapped
// as part of the above assignment. Ensure no accidental later use.
uri = NULL;
bool success = mHTTPConnection->connect(mLastURI.c_str(), &extHeaders);
mLastHeaders = extHeaders;
mCachedSizeValid = false;
if (success) {
AString sanitized = uriDebugString(mLastURI);
mName = String8::format("MediaHTTP(%s)", sanitized.c_str());
}
return success ? OK : UNKNOWN_ERROR;
}
| 173,386 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static Image *ReadCMYKImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
Image
*canvas_image,
*image;
MagickBooleanType
status;
MagickOffsetType
scene;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
register ssize_t
i;
size_t
length;
ssize_t
count,
y;
unsigned char
*pixels;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
if ((image->columns == 0) || (image->rows == 0))
ThrowReaderException(OptionError,"MustSpecifyImageSize");
SetImageColorspace(image,CMYKColorspace);
if (image_info->interlace != PartitionInterlace)
{
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
if (DiscardBlobBytes(image,image->offset) == MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
/*
Create virtual canvas to support cropping (i.e. image.cmyk[100x100+10+20]).
*/
canvas_image=CloneImage(image,image->extract_info.width,1,MagickFalse,
exception);
(void) SetImageVirtualPixelMethod(canvas_image,BlackVirtualPixelMethod);
quantum_info=AcquireQuantumInfo(image_info,canvas_image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
pixels=GetQuantumPixels(quantum_info);
quantum_type=CMYKQuantum;
if (LocaleCompare(image_info->magick,"CMYKA") == 0)
{
quantum_type=CMYKAQuantum;
image->matte=MagickTrue;
}
if (image_info->number_scenes != 0)
while (image->scene < image_info->scene)
{
/*
Skip to next image.
*/
image->scene++;
length=GetQuantumExtent(canvas_image,quantum_info,quantum_type);
for (y=0; y < (ssize_t) image->rows; y++)
{
count=ReadBlob(image,length,pixels);
if (count != (ssize_t) length)
break;
}
}
count=0;
length=0;
scene=0;
do
{
/*
Read pixels to virtual canvas image then push to image.
*/
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
SetImageColorspace(image,CMYKColorspace);
switch (image_info->interlace)
{
case NoInterlace:
default:
{
/*
No interlacing: CMYKCMYKCMYKCMYKCMYKCMYK...
*/
if (scene == 0)
{
length=GetQuantumExtent(canvas_image,quantum_info,quantum_type);
count=ReadBlob(image,length,pixels);
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const IndexPacket
*restrict canvas_indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,quantum_type,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=QueueAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
canvas_indexes=GetVirtualIndexQueue(canvas_image);
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelBlack(indexes+x,GetPixelBlack(
canvas_indexes+image->extract_info.x+x));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
break;
}
case LineInterlace:
{
static QuantumType
quantum_types[5] =
{
CyanQuantum,
MagentaQuantum,
YellowQuantum,
BlackQuantum,
OpacityQuantum
};
/*
Line interlacing: CCC...MMM...YYY...KKK...CCC...MMM...YYY...KKK...
*/
if (scene == 0)
{
length=GetQuantumExtent(canvas_image,quantum_info,CyanQuantum);
count=ReadBlob(image,length,pixels);
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const IndexPacket
*restrict canvas_indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
for (i=0; i < (image->matte != MagickFalse ? 5 : 4); i++)
{
quantum_type=quantum_types[i];
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,quantum_type,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,
0,canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
canvas_indexes=GetVirtualIndexQueue(canvas_image);
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (quantum_type)
{
case CyanQuantum:
{
SetPixelCyan(q,GetPixelCyan(p));
break;
}
case MagentaQuantum:
{
SetPixelMagenta(q,GetPixelMagenta(p));
break;
}
case YellowQuantum:
{
SetPixelYellow(q,GetPixelYellow(p));
break;
}
case BlackQuantum:
{
SetPixelIndex(indexes+x,GetPixelIndex(
canvas_indexes+image->extract_info.x+x));
break;
}
case OpacityQuantum:
{
SetPixelOpacity(q,GetPixelOpacity(p));
break;
}
default:
break;
}
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case PlaneInterlace:
{
/*
Plane interlacing: CCCCCC...MMMMMM...YYYYYY...KKKKKK...
*/
if (scene == 0)
{
length=GetQuantumExtent(canvas_image,quantum_info,CyanQuantum);
count=ReadBlob(image,length,pixels);
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,CyanQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,1,6);
if (status == MagickFalse)
break;
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,MagentaQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGreen(q,GetPixelGreen(p));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,2,6);
if (status == MagickFalse)
break;
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,YellowQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelBlue(q,GetPixelBlue(p));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,3,6);
if (status == MagickFalse)
break;
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const IndexPacket
*restrict canvas_indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,BlackQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
canvas_indexes=GetVirtualIndexQueue(canvas_image);
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(indexes+x,GetPixelIndex(
canvas_indexes+image->extract_info.x+x));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,4,6);
if (status == MagickFalse)
break;
}
if (image->matte != MagickFalse)
{
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,AlphaQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,
canvas_image->extract_info.x,0,canvas_image->columns,1,
exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelOpacity(q,GetPixelOpacity(p));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,5,6);
if (status == MagickFalse)
break;
}
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,6,6);
if (status == MagickFalse)
break;
}
break;
}
case PartitionInterlace:
{
/*
Partition interlacing: CCCCCC..., MMMMMM..., YYYYYY..., KKKKKK...
*/
AppendImageFormat("C",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
if (DiscardBlobBytes(image,image->offset) == MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
length=GetQuantumExtent(canvas_image,quantum_info,CyanQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
if (ReadBlob(image,length,pixels) != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
count=ReadBlob(image,length,pixels);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,CyanQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,1,5);
if (status == MagickFalse)
break;
}
(void) CloseBlob(image);
AppendImageFormat("M",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=GetQuantumExtent(canvas_image,quantum_info,MagentaQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
if (ReadBlob(image,length,pixels) != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
count=ReadBlob(image,length,pixels);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,MagentaQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGreen(q,GetPixelGreen(p));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,2,5);
if (status == MagickFalse)
break;
}
(void) CloseBlob(image);
AppendImageFormat("Y",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=GetQuantumExtent(canvas_image,quantum_info,YellowQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
if (ReadBlob(image,length,pixels) != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
count=ReadBlob(image,length,pixels);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,YellowQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelBlue(q,GetPixelBlue(p));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,3,5);
if (status == MagickFalse)
break;
}
(void) CloseBlob(image);
AppendImageFormat("K",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=GetQuantumExtent(canvas_image,quantum_info,BlackQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
if (ReadBlob(image,length,pixels) != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
count=ReadBlob(image,length,pixels);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const IndexPacket
*restrict canvas_indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,BlackQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
canvas_indexes=GetVirtualIndexQueue(canvas_image);
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(indexes+x,GetPixelIndex(
canvas_indexes+image->extract_info.x+x));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,3,5);
if (status == MagickFalse)
break;
}
if (image->matte != MagickFalse)
{
(void) CloseBlob(image);
AppendImageFormat("A",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=GetQuantumExtent(canvas_image,quantum_info,AlphaQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
if (ReadBlob(image,length,pixels) != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
count=ReadBlob(image,length,pixels);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,YellowQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,
0,canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelOpacity(q,GetPixelOpacity(p));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,4,5);
if (status == MagickFalse)
break;
}
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,5,5);
if (status == MagickFalse)
break;
}
break;
}
}
SetQuantumImageType(image,quantum_type);
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if (count == (ssize_t) length)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
scene++;
} while (count == (ssize_t) length);
quantum_info=DestroyQuantumInfo(quantum_info);
InheritException(&image->exception,&canvas_image->exception);
canvas_image=DestroyImage(canvas_image);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
Commit Message:
CWE ID: CWE-119 | static Image *ReadCMYKImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
Image
*canvas_image,
*image;
MagickBooleanType
status;
MagickOffsetType
scene;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
register ssize_t
i;
size_t
length;
ssize_t
count,
y;
unsigned char
*pixels;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
if ((image->columns == 0) || (image->rows == 0))
ThrowReaderException(OptionError,"MustSpecifyImageSize");
SetImageColorspace(image,CMYKColorspace);
if (image_info->interlace != PartitionInterlace)
{
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
if (DiscardBlobBytes(image,image->offset) == MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
/*
Create virtual canvas to support cropping (i.e. image.cmyk[100x100+10+20]).
*/
canvas_image=CloneImage(image,image->extract_info.width,1,MagickFalse,
exception);
(void) SetImageVirtualPixelMethod(canvas_image,BlackVirtualPixelMethod);
quantum_info=AcquireQuantumInfo(image_info,canvas_image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
pixels=GetQuantumPixels(quantum_info);
quantum_type=CMYKQuantum;
if (LocaleCompare(image_info->magick,"CMYKA") == 0)
{
quantum_type=CMYKAQuantum;
image->matte=MagickTrue;
}
if (image_info->number_scenes != 0)
while (image->scene < image_info->scene)
{
/*
Skip to next image.
*/
image->scene++;
length=GetQuantumExtent(canvas_image,quantum_info,quantum_type);
for (y=0; y < (ssize_t) image->rows; y++)
{
count=ReadBlob(image,length,pixels);
if (count != (ssize_t) length)
break;
}
}
count=0;
length=0;
scene=0;
do
{
/*
Read pixels to virtual canvas image then push to image.
*/
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
SetImageColorspace(image,CMYKColorspace);
switch (image_info->interlace)
{
case NoInterlace:
default:
{
/*
No interlacing: CMYKCMYKCMYKCMYKCMYKCMYK...
*/
if (scene == 0)
{
length=GetQuantumExtent(canvas_image,quantum_info,quantum_type);
count=ReadBlob(image,length,pixels);
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const IndexPacket
*restrict canvas_indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,quantum_type,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=QueueAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
canvas_indexes=GetVirtualIndexQueue(canvas_image);
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelBlack(indexes+x,GetPixelBlack(
canvas_indexes+image->extract_info.x+x));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
break;
}
case LineInterlace:
{
static QuantumType
quantum_types[5] =
{
CyanQuantum,
MagentaQuantum,
YellowQuantum,
BlackQuantum,
OpacityQuantum
};
/*
Line interlacing: CCC...MMM...YYY...KKK...CCC...MMM...YYY...KKK...
*/
if (scene == 0)
{
length=GetQuantumExtent(canvas_image,quantum_info,CyanQuantum);
count=ReadBlob(image,length,pixels);
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const IndexPacket
*restrict canvas_indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
for (i=0; i < (image->matte != MagickFalse ? 5 : 4); i++)
{
quantum_type=quantum_types[i];
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,quantum_type,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,
0,canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
canvas_indexes=GetVirtualIndexQueue(canvas_image);
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (quantum_type)
{
case CyanQuantum:
{
SetPixelCyan(q,GetPixelCyan(p));
break;
}
case MagentaQuantum:
{
SetPixelMagenta(q,GetPixelMagenta(p));
break;
}
case YellowQuantum:
{
SetPixelYellow(q,GetPixelYellow(p));
break;
}
case BlackQuantum:
{
SetPixelIndex(indexes+x,GetPixelIndex(
canvas_indexes+image->extract_info.x+x));
break;
}
case OpacityQuantum:
{
SetPixelOpacity(q,GetPixelOpacity(p));
break;
}
default:
break;
}
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case PlaneInterlace:
{
/*
Plane interlacing: CCCCCC...MMMMMM...YYYYYY...KKKKKK...
*/
if (scene == 0)
{
length=GetQuantumExtent(canvas_image,quantum_info,CyanQuantum);
count=ReadBlob(image,length,pixels);
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,CyanQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,1,6);
if (status == MagickFalse)
break;
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,MagentaQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGreen(q,GetPixelGreen(p));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,2,6);
if (status == MagickFalse)
break;
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,YellowQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelBlue(q,GetPixelBlue(p));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,3,6);
if (status == MagickFalse)
break;
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const IndexPacket
*restrict canvas_indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,BlackQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
canvas_indexes=GetVirtualIndexQueue(canvas_image);
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(indexes+x,GetPixelIndex(
canvas_indexes+image->extract_info.x+x));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,4,6);
if (status == MagickFalse)
break;
}
if (image->matte != MagickFalse)
{
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,AlphaQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,
canvas_image->extract_info.x,0,canvas_image->columns,1,
exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelOpacity(q,GetPixelOpacity(p));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,5,6);
if (status == MagickFalse)
break;
}
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,6,6);
if (status == MagickFalse)
break;
}
break;
}
case PartitionInterlace:
{
/*
Partition interlacing: CCCCCC..., MMMMMM..., YYYYYY..., KKKKKK...
*/
AppendImageFormat("C",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
if (DiscardBlobBytes(image,image->offset) == MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
length=GetQuantumExtent(canvas_image,quantum_info,CyanQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
if (ReadBlob(image,length,pixels) != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
count=ReadBlob(image,length,pixels);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,CyanQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,1,5);
if (status == MagickFalse)
break;
}
(void) CloseBlob(image);
AppendImageFormat("M",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=GetQuantumExtent(canvas_image,quantum_info,MagentaQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
if (ReadBlob(image,length,pixels) != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
count=ReadBlob(image,length,pixels);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,MagentaQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGreen(q,GetPixelGreen(p));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,2,5);
if (status == MagickFalse)
break;
}
(void) CloseBlob(image);
AppendImageFormat("Y",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=GetQuantumExtent(canvas_image,quantum_info,YellowQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
if (ReadBlob(image,length,pixels) != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
count=ReadBlob(image,length,pixels);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,YellowQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelBlue(q,GetPixelBlue(p));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,3,5);
if (status == MagickFalse)
break;
}
(void) CloseBlob(image);
AppendImageFormat("K",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=GetQuantumExtent(canvas_image,quantum_info,BlackQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
if (ReadBlob(image,length,pixels) != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
count=ReadBlob(image,length,pixels);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const IndexPacket
*restrict canvas_indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,BlackQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
canvas_indexes=GetVirtualIndexQueue(canvas_image);
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(indexes+x,GetPixelIndex(
canvas_indexes+image->extract_info.x+x));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,3,5);
if (status == MagickFalse)
break;
}
if (image->matte != MagickFalse)
{
(void) CloseBlob(image);
AppendImageFormat("A",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=GetQuantumExtent(canvas_image,quantum_info,AlphaQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
if (ReadBlob(image,length,pixels) != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
count=ReadBlob(image,length,pixels);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,YellowQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,
0,canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelOpacity(q,GetPixelOpacity(p));
p++;
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
count=ReadBlob(image,length,pixels);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,4,5);
if (status == MagickFalse)
break;
}
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,5,5);
if (status == MagickFalse)
break;
}
break;
}
}
SetQuantumImageType(image,quantum_type);
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if (count == (ssize_t) length)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
scene++;
} while (count == (ssize_t) length);
quantum_info=DestroyQuantumInfo(quantum_info);
InheritException(&image->exception,&canvas_image->exception);
canvas_image=DestroyImage(canvas_image);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
| 168,553 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: spnego_gss_wrap(
OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
gss_buffer_t input_message_buffer,
int *conf_state,
gss_buffer_t output_message_buffer)
{
OM_uint32 ret;
ret = gss_wrap(minor_status,
context_handle,
conf_req_flag,
qop_req,
input_message_buffer,
conf_state,
output_message_buffer);
return (ret);
}
Commit Message: Fix SPNEGO context aliasing bugs [CVE-2015-2695]
The SPNEGO mechanism currently replaces its context handle with the
mechanism context handle upon establishment, under the assumption that
most GSS functions are only called after context establishment. This
assumption is incorrect, and can lead to aliasing violations for some
programs. Maintain the SPNEGO context structure after context
establishment and refer to it in all GSS methods. Add initiate and
opened flags to the SPNEGO context structure for use in
gss_inquire_context() prior to context establishment.
CVE-2015-2695:
In MIT krb5 1.5 and later, applications which call
gss_inquire_context() on a partially-established SPNEGO context can
cause the GSS-API library to read from a pointer using the wrong type,
generally causing a process crash. This bug may go unnoticed, because
the most common SPNEGO authentication scenario establishes the context
after just one call to gss_accept_sec_context(). Java server
applications using the native JGSS provider are vulnerable to this
bug. A carefully crafted SPNEGO packet might allow the
gss_inquire_context() call to succeed with attacker-determined
results, but applications should not make access control decisions
based on gss_inquire_context() results prior to context establishment.
CVSSv2 Vector: AV:N/AC:M/Au:N/C:N/I:N/A:C/E:POC/RL:OF/RC:C
[[email protected]: several bugfixes, style changes, and edge-case
behavior changes; commit message and CVE description]
ticket: 8244
target_version: 1.14
tags: pullup
CWE ID: CWE-18 | spnego_gss_wrap(
OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
gss_buffer_t input_message_buffer,
int *conf_state,
gss_buffer_t output_message_buffer)
{
OM_uint32 ret;
spnego_gss_ctx_id_t sc = (spnego_gss_ctx_id_t)context_handle;
if (sc->ctx_handle == GSS_C_NO_CONTEXT)
return (GSS_S_NO_CONTEXT);
ret = gss_wrap(minor_status,
sc->ctx_handle,
conf_req_flag,
qop_req,
input_message_buffer,
conf_state,
output_message_buffer);
return (ret);
}
| 166,671 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void NaClProcessHost::OnPpapiChannelCreated(
const IPC::ChannelHandle& channel_handle) {
DCHECK(enable_ipc_proxy_);
ReplyToRenderer(channel_handle);
}
Commit Message: Revert 143656 - Add an IPC channel between the NaCl loader process and the renderer.
BUG=116317
TEST=ppapi, nacl tests, manual testing for experimental IPC proxy.
Review URL: https://chromiumcodereview.appspot.com/10641016
[email protected]
Review URL: https://chromiumcodereview.appspot.com/10625007
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@143665 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | void NaClProcessHost::OnPpapiChannelCreated(
return ReplyToRenderer() && StartNaClExecution();
}
| 170,726 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void LauncherView::OnBoundsChanged(const gfx::Rect& previous_bounds) {
LayoutToIdealBounds();
FOR_EACH_OBSERVER(LauncherIconObserver, observers_,
OnLauncherIconPositionsChanged());
}
Commit Message: ash: Add launcher overflow bubble.
- Host a LauncherView in bubble to display overflown items;
- Mouse wheel and two-finger scroll to scroll the LauncherView in bubble in case overflow bubble is overflown;
- Fit bubble when items are added/removed;
- Keep launcher bar on screen when the bubble is shown;
BUG=128054
TEST=Verify launcher overflown items are in a bubble instead of menu.
Review URL: https://chromiumcodereview.appspot.com/10659003
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@146460 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | void LauncherView::OnBoundsChanged(const gfx::Rect& previous_bounds) {
LayoutToIdealBounds();
FOR_EACH_OBSERVER(LauncherIconObserver, observers_,
OnLauncherIconPositionsChanged());
if (IsShowingOverflowBubble())
overflow_bubble_->Hide();
}
| 170,894 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: InputMethodStatusConnection* ChromeOSMonitorInputMethodStatus(
void* language_library,
LanguageCurrentInputMethodMonitorFunction current_input_method_changed,
LanguageRegisterImePropertiesFunction register_ime_properties,
LanguageUpdateImePropertyFunction update_ime_property,
LanguageConnectionChangeMonitorFunction connection_changed) {
DLOG(INFO) << "MonitorInputMethodStatus";
return InputMethodStatusConnection::GetConnection(
language_library,
current_input_method_changed,
register_ime_properties,
update_ime_property,
connection_changed);
}
Commit Message: Remove use of libcros from InputMethodLibrary.
BUG=chromium-os:16238
TEST==confirm that input methods work as before on the netbook. Also confirm that the chrome builds and works on the desktop as before.
Review URL: http://codereview.chromium.org/7003086
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@89142 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | InputMethodStatusConnection* ChromeOSMonitorInputMethodStatus(
| 170,524 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void CairoOutputDev::drawSoftMaskedImage(GfxState *state, Object *ref, Stream *str,
int width, int height,
GfxImageColorMap *colorMap,
Stream *maskStr,
int maskWidth, int maskHeight,
GfxImageColorMap *maskColorMap)
{
ImageStream *maskImgStr;
maskImgStr = new ImageStream(maskStr, maskWidth,
maskColorMap->getNumPixelComps(),
maskColorMap->getBits());
maskImgStr->reset();
int row_stride = (maskWidth + 3) & ~3;
unsigned char *maskBuffer;
maskBuffer = (unsigned char *)gmalloc (row_stride * maskHeight);
unsigned char *maskDest;
cairo_surface_t *maskImage;
cairo_pattern_t *maskPattern;
Guchar *pix;
int y;
for (y = 0; y < maskHeight; y++) {
maskDest = (unsigned char *) (maskBuffer + y * row_stride);
pix = maskImgStr->getLine();
maskColorMap->getGrayLine (pix, maskDest, maskWidth);
}
maskImage = cairo_image_surface_create_for_data (maskBuffer, CAIRO_FORMAT_A8,
maskWidth, maskHeight, row_stride);
delete maskImgStr;
maskStr->close();
unsigned char *buffer;
unsigned int *dest;
cairo_surface_t *image;
cairo_pattern_t *pattern;
ImageStream *imgStr;
cairo_matrix_t matrix;
cairo_matrix_t maskMatrix;
int is_identity_transform;
buffer = (unsigned char *)gmalloc (width * height * 4);
/* TODO: Do we want to cache these? */
imgStr = new ImageStream(str, width,
colorMap->getNumPixelComps(),
colorMap->getBits());
imgStr->reset();
/* ICCBased color space doesn't do any color correction
* so check its underlying color space as well */
is_identity_transform = colorMap->getColorSpace()->getMode() == csDeviceRGB ||
(colorMap->getColorSpace()->getMode() == csICCBased &&
((GfxICCBasedColorSpace*)colorMap->getColorSpace())->getAlt()->getMode() == csDeviceRGB);
for (y = 0; y < height; y++) {
dest = (unsigned int *) (buffer + y * 4 * width);
pix = imgStr->getLine();
colorMap->getRGBLine (pix, dest, width);
}
image = cairo_image_surface_create_for_data (buffer, CAIRO_FORMAT_RGB24,
width, height, width * 4);
if (image == NULL) {
delete imgStr;
return;
}
pattern = cairo_pattern_create_for_surface (image);
maskPattern = cairo_pattern_create_for_surface (maskImage);
if (pattern == NULL) {
delete imgStr;
return;
}
LOG (printf ("drawSoftMaskedImage %dx%d\n", width, height));
cairo_matrix_init_translate (&matrix, 0, height);
cairo_matrix_scale (&matrix, width, -height);
cairo_matrix_init_translate (&maskMatrix, 0, maskHeight);
cairo_matrix_scale (&maskMatrix, maskWidth, -maskHeight);
cairo_pattern_set_matrix (pattern, &matrix);
cairo_pattern_set_matrix (maskPattern, &maskMatrix);
cairo_pattern_set_filter (pattern, CAIRO_FILTER_BILINEAR);
cairo_pattern_set_filter (maskPattern, CAIRO_FILTER_BILINEAR);
cairo_set_source (cairo, pattern);
cairo_mask (cairo, maskPattern);
if (cairo_shape) {
#if 0
cairo_rectangle (cairo_shape, 0., 0., width, height);
cairo_fill (cairo_shape);
#else
cairo_save (cairo_shape);
/* this should draw a rectangle the size of the image
* we use this instead of rect,fill because of the lack
* of EXTEND_PAD */
/* NOTE: this will multiply the edges of the image twice */
cairo_set_source (cairo_shape, pattern);
cairo_mask (cairo_shape, pattern);
cairo_restore (cairo_shape);
#endif
}
cairo_pattern_destroy (maskPattern);
cairo_surface_destroy (maskImage);
cairo_pattern_destroy (pattern);
cairo_surface_destroy (image);
free (buffer);
free (maskBuffer);
delete imgStr;
}
Commit Message:
CWE ID: CWE-189 | void CairoOutputDev::drawSoftMaskedImage(GfxState *state, Object *ref, Stream *str,
int width, int height,
GfxImageColorMap *colorMap,
Stream *maskStr,
int maskWidth, int maskHeight,
GfxImageColorMap *maskColorMap)
{
ImageStream *maskImgStr;
maskImgStr = new ImageStream(maskStr, maskWidth,
maskColorMap->getNumPixelComps(),
maskColorMap->getBits());
maskImgStr->reset();
int row_stride = (maskWidth + 3) & ~3;
unsigned char *maskBuffer;
maskBuffer = (unsigned char *)gmallocn (row_stride, maskHeight);
unsigned char *maskDest;
cairo_surface_t *maskImage;
cairo_pattern_t *maskPattern;
Guchar *pix;
int y;
for (y = 0; y < maskHeight; y++) {
maskDest = (unsigned char *) (maskBuffer + y * row_stride);
pix = maskImgStr->getLine();
maskColorMap->getGrayLine (pix, maskDest, maskWidth);
}
maskImage = cairo_image_surface_create_for_data (maskBuffer, CAIRO_FORMAT_A8,
maskWidth, maskHeight, row_stride);
delete maskImgStr;
maskStr->close();
unsigned char *buffer;
unsigned int *dest;
cairo_surface_t *image;
cairo_pattern_t *pattern;
ImageStream *imgStr;
cairo_matrix_t matrix;
cairo_matrix_t maskMatrix;
int is_identity_transform;
buffer = (unsigned char *)gmallocn3 (width, height, 4);
/* TODO: Do we want to cache these? */
imgStr = new ImageStream(str, width,
colorMap->getNumPixelComps(),
colorMap->getBits());
imgStr->reset();
/* ICCBased color space doesn't do any color correction
* so check its underlying color space as well */
is_identity_transform = colorMap->getColorSpace()->getMode() == csDeviceRGB ||
(colorMap->getColorSpace()->getMode() == csICCBased &&
((GfxICCBasedColorSpace*)colorMap->getColorSpace())->getAlt()->getMode() == csDeviceRGB);
for (y = 0; y < height; y++) {
dest = (unsigned int *) (buffer + y * 4 * width);
pix = imgStr->getLine();
colorMap->getRGBLine (pix, dest, width);
}
image = cairo_image_surface_create_for_data (buffer, CAIRO_FORMAT_RGB24,
width, height, width * 4);
if (image == NULL) {
delete imgStr;
return;
}
pattern = cairo_pattern_create_for_surface (image);
maskPattern = cairo_pattern_create_for_surface (maskImage);
if (pattern == NULL) {
delete imgStr;
return;
}
LOG (printf ("drawSoftMaskedImage %dx%d\n", width, height));
cairo_matrix_init_translate (&matrix, 0, height);
cairo_matrix_scale (&matrix, width, -height);
cairo_matrix_init_translate (&maskMatrix, 0, maskHeight);
cairo_matrix_scale (&maskMatrix, maskWidth, -maskHeight);
cairo_pattern_set_matrix (pattern, &matrix);
cairo_pattern_set_matrix (maskPattern, &maskMatrix);
cairo_pattern_set_filter (pattern, CAIRO_FILTER_BILINEAR);
cairo_pattern_set_filter (maskPattern, CAIRO_FILTER_BILINEAR);
cairo_set_source (cairo, pattern);
cairo_mask (cairo, maskPattern);
if (cairo_shape) {
#if 0
cairo_rectangle (cairo_shape, 0., 0., width, height);
cairo_fill (cairo_shape);
#else
cairo_save (cairo_shape);
/* this should draw a rectangle the size of the image
* we use this instead of rect,fill because of the lack
* of EXTEND_PAD */
/* NOTE: this will multiply the edges of the image twice */
cairo_set_source (cairo_shape, pattern);
cairo_mask (cairo_shape, pattern);
cairo_restore (cairo_shape);
#endif
}
cairo_pattern_destroy (maskPattern);
cairo_surface_destroy (maskImage);
cairo_pattern_destroy (pattern);
cairo_surface_destroy (image);
free (buffer);
free (maskBuffer);
delete imgStr;
}
| 164,607 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int dtls1_process_buffered_records(SSL *s)
{
pitem *item;
SSL3_BUFFER *rb;
item = pqueue_peek(s->rlayer.d->unprocessed_rcds.q);
if (item) {
/* Check if epoch is current. */
if (s->rlayer.d->unprocessed_rcds.epoch != s->rlayer.d->r_epoch)
return (1); /* Nothing to do. */
rb = RECORD_LAYER_get_rbuf(&s->rlayer);
*/
return 1;
}
/* Process all the records. */
while (pqueue_peek(s->rlayer.d->unprocessed_rcds.q)) {
dtls1_get_unprocessed_record(s);
if (!dtls1_process_record(s))
return (0);
if (dtls1_buffer_record(s, &(s->rlayer.d->processed_rcds),
/* Process all the records. */
while (pqueue_peek(s->rlayer.d->unprocessed_rcds.q)) {
dtls1_get_unprocessed_record(s);
if (!dtls1_process_record(s))
return (0);
if (dtls1_buffer_record(s, &(s->rlayer.d->processed_rcds),
SSL3_RECORD_get_seq_num(s->rlayer.rrec)) <
0)
return -1;
}
}
* here, anything else is handled by higher layers
* Application data protocol
* none of our business
*/
s->rlayer.d->processed_rcds.epoch = s->rlayer.d->r_epoch;
s->rlayer.d->unprocessed_rcds.epoch = s->rlayer.d->r_epoch + 1;
return (1);
}
Commit Message:
CWE ID: CWE-189 | int dtls1_process_buffered_records(SSL *s)
{
pitem *item;
SSL3_BUFFER *rb;
SSL3_RECORD *rr;
DTLS1_BITMAP *bitmap;
unsigned int is_next_epoch;
int replayok = 1;
item = pqueue_peek(s->rlayer.d->unprocessed_rcds.q);
if (item) {
/* Check if epoch is current. */
if (s->rlayer.d->unprocessed_rcds.epoch != s->rlayer.d->r_epoch)
return 1; /* Nothing to do. */
rr = RECORD_LAYER_get_rrec(&s->rlayer);
rb = RECORD_LAYER_get_rbuf(&s->rlayer);
*/
return 1;
}
/* Process all the records. */
while (pqueue_peek(s->rlayer.d->unprocessed_rcds.q)) {
dtls1_get_unprocessed_record(s);
if (!dtls1_process_record(s))
return (0);
if (dtls1_buffer_record(s, &(s->rlayer.d->processed_rcds),
/* Process all the records. */
while (pqueue_peek(s->rlayer.d->unprocessed_rcds.q)) {
dtls1_get_unprocessed_record(s);
bitmap = dtls1_get_bitmap(s, rr, &is_next_epoch);
if (bitmap == NULL) {
/*
* Should not happen. This will only ever be NULL when the
* current record is from a different epoch. But that cannot
* be the case because we already checked the epoch above
*/
SSLerr(SSL_F_DTLS1_PROCESS_BUFFERED_RECORDS,
ERR_R_INTERNAL_ERROR);
return 0;
}
#ifndef OPENSSL_NO_SCTP
/* Only do replay check if no SCTP bio */
if (!BIO_dgram_is_sctp(SSL_get_rbio(s)))
#endif
{
/*
* Check whether this is a repeat, or aged record. We did this
* check once already when we first received the record - but
* we might have updated the window since then due to
* records we subsequently processed.
*/
replayok = dtls1_record_replay_check(s, bitmap);
}
if (!replayok || !dtls1_process_record(s, bitmap)) {
/* dump this record */
rr->length = 0;
RECORD_LAYER_reset_packet_length(&s->rlayer);
continue;
}
if (dtls1_buffer_record(s, &(s->rlayer.d->processed_rcds),
SSL3_RECORD_get_seq_num(s->rlayer.rrec)) < 0)
return 0;
}
}
* here, anything else is handled by higher layers
* Application data protocol
* none of our business
*/
s->rlayer.d->processed_rcds.epoch = s->rlayer.d->r_epoch;
s->rlayer.d->unprocessed_rcds.epoch = s->rlayer.d->r_epoch + 1;
return 1;
}
| 165,194 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: PassRefPtr<RTCSessionDescriptionDescriptor> RTCPeerConnectionHandlerChromium::localDescription()
{
if (!m_webHandler)
return 0;
return m_webHandler->localDescription();
}
Commit Message: Unreviewed, rolling out r127612, r127660, and r127664.
http://trac.webkit.org/changeset/127612
http://trac.webkit.org/changeset/127660
http://trac.webkit.org/changeset/127664
https://bugs.webkit.org/show_bug.cgi?id=95920
Source/Platform:
* Platform.gypi:
* chromium/public/WebRTCPeerConnectionHandler.h:
(WebKit):
(WebRTCPeerConnectionHandler):
* chromium/public/WebRTCVoidRequest.h: Removed.
Source/WebCore:
* CMakeLists.txt:
* GNUmakefile.list.am:
* Modules/mediastream/RTCErrorCallback.h:
(WebCore):
(RTCErrorCallback):
* Modules/mediastream/RTCErrorCallback.idl:
* Modules/mediastream/RTCPeerConnection.cpp:
(WebCore::RTCPeerConnection::createOffer):
* Modules/mediastream/RTCPeerConnection.h:
(WebCore):
(RTCPeerConnection):
* Modules/mediastream/RTCPeerConnection.idl:
* Modules/mediastream/RTCSessionDescriptionCallback.h:
(WebCore):
(RTCSessionDescriptionCallback):
* Modules/mediastream/RTCSessionDescriptionCallback.idl:
* Modules/mediastream/RTCSessionDescriptionRequestImpl.cpp:
(WebCore::RTCSessionDescriptionRequestImpl::create):
(WebCore::RTCSessionDescriptionRequestImpl::RTCSessionDescriptionRequestImpl):
(WebCore::RTCSessionDescriptionRequestImpl::requestSucceeded):
(WebCore::RTCSessionDescriptionRequestImpl::requestFailed):
(WebCore::RTCSessionDescriptionRequestImpl::clear):
* Modules/mediastream/RTCSessionDescriptionRequestImpl.h:
(RTCSessionDescriptionRequestImpl):
* Modules/mediastream/RTCVoidRequestImpl.cpp: Removed.
* Modules/mediastream/RTCVoidRequestImpl.h: Removed.
* WebCore.gypi:
* platform/chromium/support/WebRTCVoidRequest.cpp: Removed.
* platform/mediastream/RTCPeerConnectionHandler.cpp:
(RTCPeerConnectionHandlerDummy):
(WebCore::RTCPeerConnectionHandlerDummy::RTCPeerConnectionHandlerDummy):
* platform/mediastream/RTCPeerConnectionHandler.h:
(WebCore):
(WebCore::RTCPeerConnectionHandler::~RTCPeerConnectionHandler):
(RTCPeerConnectionHandler):
(WebCore::RTCPeerConnectionHandler::RTCPeerConnectionHandler):
* platform/mediastream/RTCVoidRequest.h: Removed.
* platform/mediastream/chromium/RTCPeerConnectionHandlerChromium.cpp:
* platform/mediastream/chromium/RTCPeerConnectionHandlerChromium.h:
(RTCPeerConnectionHandlerChromium):
Tools:
* DumpRenderTree/chromium/MockWebRTCPeerConnectionHandler.cpp:
(MockWebRTCPeerConnectionHandler::SuccessCallbackTask::SuccessCallbackTask):
(MockWebRTCPeerConnectionHandler::SuccessCallbackTask::runIfValid):
(MockWebRTCPeerConnectionHandler::FailureCallbackTask::FailureCallbackTask):
(MockWebRTCPeerConnectionHandler::FailureCallbackTask::runIfValid):
(MockWebRTCPeerConnectionHandler::createOffer):
* DumpRenderTree/chromium/MockWebRTCPeerConnectionHandler.h:
(MockWebRTCPeerConnectionHandler):
(SuccessCallbackTask):
(FailureCallbackTask):
LayoutTests:
* fast/mediastream/RTCPeerConnection-createOffer.html:
* fast/mediastream/RTCPeerConnection-localDescription-expected.txt: Removed.
* fast/mediastream/RTCPeerConnection-localDescription.html: Removed.
* fast/mediastream/RTCPeerConnection-remoteDescription-expected.txt: Removed.
* fast/mediastream/RTCPeerConnection-remoteDescription.html: Removed.
git-svn-id: svn://svn.chromium.org/blink/trunk@127679 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-20 | PassRefPtr<RTCSessionDescriptionDescriptor> RTCPeerConnectionHandlerChromium::localDescription()
| 170,352 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: decode_rt_routing_info(netdissect_options *ndo,
const u_char *pptr, char *buf, u_int buflen)
{
uint8_t route_target[8];
u_int plen;
ND_TCHECK(pptr[0]);
plen = pptr[0]; /* get prefix length */
if (0 == plen) {
snprintf(buf, buflen, "default route target");
return 1;
}
if (32 > plen)
return -1;
plen-=32; /* adjust prefix length */
if (64 < plen)
return -1;
memset(&route_target, 0, sizeof(route_target));
ND_TCHECK2(pptr[1], (plen + 7) / 8);
memcpy(&route_target, &pptr[1], (plen + 7) / 8);
if (plen % 8) {
((u_char *)&route_target)[(plen + 7) / 8 - 1] &=
((0xff00 >> (plen % 8)) & 0xff);
}
snprintf(buf, buflen, "origin AS: %s, route target %s",
as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(pptr+1)),
bgp_vpn_rd_print(ndo, (u_char *)&route_target));
return 5 + (plen + 7) / 8;
trunc:
return -2;
}
Commit Message: CVE-2017-13053/BGP: fix VPN route target bounds checks
decode_rt_routing_info() didn't check bounds before fetching 4 octets of
the origin AS field and could over-read the input buffer, put it right.
It also fetched the varying number of octets of the route target field
from 4 octets lower than the correct offset, put it right.
It also used the same temporary buffer explicitly through as_printf()
and implicitly through bgp_vpn_rd_print() so the end result of snprintf()
was not what was originally intended.
This fixes a buffer over-read discovered by Bhargava Shastry,
SecT/TU Berlin.
Add a test using the capture file supplied by the reporter(s).
CWE ID: CWE-125 | decode_rt_routing_info(netdissect_options *ndo,
const u_char *pptr, char *buf, u_int buflen)
{
uint8_t route_target[8];
u_int plen;
char asbuf[sizeof(astostr)]; /* bgp_vpn_rd_print() overwrites astostr */
/* NLRI "prefix length" from RFC 2858 Section 4. */
ND_TCHECK(pptr[0]);
plen = pptr[0]; /* get prefix length */
/* NLRI "prefix" (ibid), valid lengths are { 0, 32, 33, ..., 96 } bits.
* RFC 4684 Section 4 defines the layout of "origin AS" and "route
* target" fields inside the "prefix" depending on its length.
*/
if (0 == plen) {
/* Without "origin AS", without "route target". */
snprintf(buf, buflen, "default route target");
return 1;
}
if (32 > plen)
return -1;
/* With at least "origin AS", possibly with "route target". */
ND_TCHECK_32BITS(pptr + 1);
as_printf(ndo, asbuf, sizeof(asbuf), EXTRACT_32BITS(pptr + 1));
plen-=32; /* adjust prefix length */
if (64 < plen)
return -1;
/* From now on (plen + 7) / 8 evaluates to { 0, 1, 2, ..., 8 }
* and gives the number of octets in the variable-length "route
* target" field inside this NLRI "prefix". Look for it.
*/
memset(&route_target, 0, sizeof(route_target));
ND_TCHECK2(pptr[5], (plen + 7) / 8);
memcpy(&route_target, &pptr[5], (plen + 7) / 8);
/* Which specification says to do this? */
if (plen % 8) {
((u_char *)&route_target)[(plen + 7) / 8 - 1] &=
((0xff00 >> (plen % 8)) & 0xff);
}
snprintf(buf, buflen, "origin AS: %s, route target %s",
asbuf,
bgp_vpn_rd_print(ndo, (u_char *)&route_target));
return 5 + (plen + 7) / 8;
trunc:
return -2;
}
| 167,820 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: OmniboxPopupViewGtk::~OmniboxPopupViewGtk() {
model_.reset();
g_object_unref(layout_);
gtk_widget_destroy(window_);
for (ImageMap::iterator it = images_.begin(); it != images_.end(); ++it)
delete it->second;
}
Commit Message: GTK: Stop listening to gtk signals in the omnibox before destroying the model.
BUG=123530
TEST=none
Review URL: http://codereview.chromium.org/10103012
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@132498 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | OmniboxPopupViewGtk::~OmniboxPopupViewGtk() {
// Stop listening to our signals before we destroy the model. I suspect that
// we can race window destruction, otherwise.
signal_registrar_.reset();
model_.reset();
g_object_unref(layout_);
gtk_widget_destroy(window_);
for (ImageMap::iterator it = images_.begin(); it != images_.end(); ++it)
delete it->second;
}
| 171,049 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int parse_report(transport_smart *transport, git_push *push)
{
git_pkt *pkt = NULL;
const char *line_end = NULL;
gitno_buffer *buf = &transport->buffer;
int error, recvd;
git_buf data_pkt_buf = GIT_BUF_INIT;
for (;;) {
if (buf->offset > 0)
error = git_pkt_parse_line(&pkt, buf->data,
&line_end, buf->offset);
else
error = GIT_EBUFS;
if (error < 0 && error != GIT_EBUFS) {
error = -1;
goto done;
}
if (error == GIT_EBUFS) {
if ((recvd = gitno_recv(buf)) < 0) {
error = recvd;
goto done;
}
if (recvd == 0) {
giterr_set(GITERR_NET, "early EOF");
error = GIT_EEOF;
goto done;
}
continue;
}
gitno_consume(buf, line_end);
error = 0;
if (pkt == NULL)
continue;
switch (pkt->type) {
case GIT_PKT_DATA:
/* This is a sideband packet which contains other packets */
error = add_push_report_sideband_pkt(push, (git_pkt_data *)pkt, &data_pkt_buf);
break;
case GIT_PKT_ERR:
giterr_set(GITERR_NET, "report-status: Error reported: %s",
((git_pkt_err *)pkt)->error);
error = -1;
break;
case GIT_PKT_PROGRESS:
if (transport->progress_cb) {
git_pkt_progress *p = (git_pkt_progress *) pkt;
error = transport->progress_cb(p->data, p->len, transport->message_cb_payload);
}
break;
default:
error = add_push_report_pkt(push, pkt);
break;
}
git_pkt_free(pkt);
/* add_push_report_pkt returns GIT_ITEROVER when it receives a flush */
if (error == GIT_ITEROVER) {
error = 0;
if (data_pkt_buf.size > 0) {
/* If there was data remaining in the pack data buffer,
* then the server sent a partial pkt-line */
giterr_set(GITERR_NET, "Incomplete pack data pkt-line");
error = GIT_ERROR;
}
goto done;
}
if (error < 0) {
goto done;
}
}
done:
git_buf_free(&data_pkt_buf);
return error;
}
Commit Message: smart_pkt: treat empty packet lines as error
The Git protocol does not specify what should happen in the case
of an empty packet line (that is a packet line "0004"). We
currently indicate success, but do not return a packet in the
case where we hit an empty line. The smart protocol was not
prepared to handle such packets in all cases, though, resulting
in a `NULL` pointer dereference.
Fix the issue by returning an error instead. As such kind of
packets is not even specified by upstream, this is the right
thing to do.
CWE ID: CWE-476 | static int parse_report(transport_smart *transport, git_push *push)
{
git_pkt *pkt = NULL;
const char *line_end = NULL;
gitno_buffer *buf = &transport->buffer;
int error, recvd;
git_buf data_pkt_buf = GIT_BUF_INIT;
for (;;) {
if (buf->offset > 0)
error = git_pkt_parse_line(&pkt, buf->data,
&line_end, buf->offset);
else
error = GIT_EBUFS;
if (error < 0 && error != GIT_EBUFS) {
error = -1;
goto done;
}
if (error == GIT_EBUFS) {
if ((recvd = gitno_recv(buf)) < 0) {
error = recvd;
goto done;
}
if (recvd == 0) {
giterr_set(GITERR_NET, "early EOF");
error = GIT_EEOF;
goto done;
}
continue;
}
gitno_consume(buf, line_end);
error = 0;
switch (pkt->type) {
case GIT_PKT_DATA:
/* This is a sideband packet which contains other packets */
error = add_push_report_sideband_pkt(push, (git_pkt_data *)pkt, &data_pkt_buf);
break;
case GIT_PKT_ERR:
giterr_set(GITERR_NET, "report-status: Error reported: %s",
((git_pkt_err *)pkt)->error);
error = -1;
break;
case GIT_PKT_PROGRESS:
if (transport->progress_cb) {
git_pkt_progress *p = (git_pkt_progress *) pkt;
error = transport->progress_cb(p->data, p->len, transport->message_cb_payload);
}
break;
default:
error = add_push_report_pkt(push, pkt);
break;
}
git_pkt_free(pkt);
/* add_push_report_pkt returns GIT_ITEROVER when it receives a flush */
if (error == GIT_ITEROVER) {
error = 0;
if (data_pkt_buf.size > 0) {
/* If there was data remaining in the pack data buffer,
* then the server sent a partial pkt-line */
giterr_set(GITERR_NET, "Incomplete pack data pkt-line");
error = GIT_ERROR;
}
goto done;
}
if (error < 0) {
goto done;
}
}
done:
git_buf_free(&data_pkt_buf);
return error;
}
| 168,529 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: virtual void SetUp() {
const tuple<int, int, VarianceFunctionType>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
variance_ = get<2>(params);
rnd(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
src_ = new uint8_t[block_size_];
ref_ = new uint8_t[block_size_];
ASSERT_TRUE(src_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478
DO NOT MERGE - libvpx: Pull from upstream
Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06
BUG=23452792
Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec
CWE ID: CWE-119 | virtual void SetUp() {
const tuple<int, int, VarianceFunctionType, int>& params = this->GetParam();
log2width_ = get<0>(params);
width_ = 1 << log2width_;
log2height_ = get<1>(params);
height_ = 1 << log2height_;
variance_ = get<2>(params);
if (get<3>(params)) {
bit_depth_ = static_cast<vpx_bit_depth_t>(get<3>(params));
use_high_bit_depth_ = true;
} else {
bit_depth_ = VPX_BITS_8;
use_high_bit_depth_ = false;
}
mask_ = (1 << bit_depth_) - 1;
rnd_.Reset(ACMRandom::DeterministicSeed());
block_size_ = width_ * height_;
if (!use_high_bit_depth_) {
src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_ * 2));
ref_ = new uint8_t[block_size_ * 2];
#if CONFIG_VP9_HIGHBITDEPTH
} else {
src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
vpx_memalign(16, block_size_ * 2 * sizeof(uint16_t))));
ref_ = CONVERT_TO_BYTEPTR(new uint16_t[block_size_ * 2]);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
ASSERT_TRUE(src_ != NULL);
ASSERT_TRUE(ref_ != NULL);
}
| 174,589 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: SpeechRecognitionManagerImpl::SpeechRecognitionManagerImpl(
media::AudioSystem* audio_system,
MediaStreamManager* media_stream_manager)
: audio_system_(audio_system),
media_stream_manager_(media_stream_manager),
primary_session_id_(kSessionIDInvalid),
last_session_id_(kSessionIDInvalid),
is_dispatching_event_(false),
delegate_(GetContentClient()
->browser()
->CreateSpeechRecognitionManagerDelegate()),
weak_factory_(this) {
DCHECK(!g_speech_recognition_manager_impl);
g_speech_recognition_manager_impl = this;
frame_deletion_observer_.reset(new FrameDeletionObserver(
base::BindRepeating(&SpeechRecognitionManagerImpl::AbortSessionImpl,
weak_factory_.GetWeakPtr())));
}
Commit Message: Make MediaStreamDispatcherHost per-request instead of per-frame.
Instead of having RenderFrameHost own a single MSDH to handle all
requests from a frame, MSDH objects will be owned by a strong binding.
A consequence of this is that an additional requester ID is added to
requests to MediaStreamManager, so that an MSDH is able to cancel only
requests generated by it.
In practice, MSDH will continue to be per frame in most cases since
each frame normally makes a single request for an MSDH object.
This fixes a lifetime issue caused by the IO thread executing tasks
after the RenderFrameHost dies.
Drive-by: Fix some minor lint issues.
Bug: 912520
Change-Id: I52742ffc98b9fc57ce8e6f5093a61aed86d3e516
Reviewed-on: https://chromium-review.googlesource.com/c/1369799
Reviewed-by: Emircan Uysaler <[email protected]>
Reviewed-by: Ken Buchanan <[email protected]>
Reviewed-by: Olga Sharonova <[email protected]>
Commit-Queue: Guido Urdaneta <[email protected]>
Cr-Commit-Position: refs/heads/master@{#616347}
CWE ID: CWE-189 | SpeechRecognitionManagerImpl::SpeechRecognitionManagerImpl(
media::AudioSystem* audio_system,
MediaStreamManager* media_stream_manager)
: audio_system_(audio_system),
media_stream_manager_(media_stream_manager),
primary_session_id_(kSessionIDInvalid),
last_session_id_(kSessionIDInvalid),
is_dispatching_event_(false),
delegate_(GetContentClient()
->browser()
->CreateSpeechRecognitionManagerDelegate()),
requester_id_(next_requester_id_++),
weak_factory_(this) {
DCHECK(!g_speech_recognition_manager_impl);
g_speech_recognition_manager_impl = this;
frame_deletion_observer_.reset(new FrameDeletionObserver(
base::BindRepeating(&SpeechRecognitionManagerImpl::AbortSessionImpl,
weak_factory_.GetWeakPtr())));
}
| 173,111 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int mount_entry(const char *fsname, const char *target,
const char *fstype, unsigned long mountflags,
const char *data, int optional)
{
#ifdef HAVE_STATVFS
struct statvfs sb;
#endif
if (mount(fsname, target, fstype, mountflags & ~MS_REMOUNT, data)) {
if (optional) {
INFO("failed to mount '%s' on '%s' (optional): %s", fsname,
target, strerror(errno));
return 0;
}
else {
SYSERROR("failed to mount '%s' on '%s'", fsname, target);
return -1;
}
}
if ((mountflags & MS_REMOUNT) || (mountflags & MS_BIND)) {
DEBUG("remounting %s on %s to respect bind or remount options",
fsname ? fsname : "(none)", target ? target : "(none)");
unsigned long rqd_flags = 0;
if (mountflags & MS_RDONLY)
rqd_flags |= MS_RDONLY;
#ifdef HAVE_STATVFS
if (statvfs(fsname, &sb) == 0) {
unsigned long required_flags = rqd_flags;
if (sb.f_flag & MS_NOSUID)
required_flags |= MS_NOSUID;
if (sb.f_flag & MS_NODEV)
required_flags |= MS_NODEV;
if (sb.f_flag & MS_RDONLY)
required_flags |= MS_RDONLY;
if (sb.f_flag & MS_NOEXEC)
required_flags |= MS_NOEXEC;
DEBUG("(at remount) flags for %s was %lu, required extra flags are %lu", fsname, sb.f_flag, required_flags);
/*
* If this was a bind mount request, and required_flags
* does not have any flags which are not already in
* mountflags, then skip the remount
*/
if (!(mountflags & MS_REMOUNT)) {
if (!(required_flags & ~mountflags) && rqd_flags == 0) {
DEBUG("mountflags already was %lu, skipping remount",
mountflags);
goto skipremount;
}
}
mountflags |= required_flags;
}
#endif
if (mount(fsname, target, fstype,
mountflags | MS_REMOUNT, data)) {
if (optional) {
INFO("failed to mount '%s' on '%s' (optional): %s",
fsname, target, strerror(errno));
return 0;
}
else {
SYSERROR("failed to mount '%s' on '%s'",
fsname, target);
return -1;
}
}
}
#ifdef HAVE_STATVFS
skipremount:
#endif
DEBUG("mounted '%s' on '%s', type '%s'", fsname, target, fstype);
return 0;
}
Commit Message: CVE-2015-1335: Protect container mounts against symlinks
When a container starts up, lxc sets up the container's inital fstree
by doing a bunch of mounting, guided by the container configuration
file. The container config is owned by the admin or user on the host,
so we do not try to guard against bad entries. However, since the
mount target is in the container, it's possible that the container admin
could divert the mount with symbolic links. This could bypass proper
container startup (i.e. confinement of a root-owned container by the
restrictive apparmor policy, by diverting the required write to
/proc/self/attr/current), or bypass the (path-based) apparmor policy
by diverting, say, /proc to /mnt in the container.
To prevent this,
1. do not allow mounts to paths containing symbolic links
2. do not allow bind mounts from relative paths containing symbolic
links.
Details:
Define safe_mount which ensures that the container has not inserted any
symbolic links into any mount targets for mounts to be done during
container setup.
The host's mount path may contain symbolic links. As it is under the
control of the administrator, that's ok. So safe_mount begins the check
for symbolic links after the rootfs->mount, by opening that directory.
It opens each directory along the path using openat() relative to the
parent directory using O_NOFOLLOW. When the target is reached, it
mounts onto /proc/self/fd/<targetfd>.
Use safe_mount() in mount_entry(), when mounting container proc,
and when needed. In particular, safe_mount() need not be used in
any case where:
1. the mount is done in the container's namespace
2. the mount is for the container's rootfs
3. the mount is relative to a tmpfs or proc/sysfs which we have
just safe_mount()ed ourselves
Since we were using proc/net as a temporary placeholder for /proc/sys/net
during container startup, and proc/net is a symbolic link, use proc/tty
instead.
Update the lxc.container.conf manpage with details about the new
restrictions.
Finally, add a testcase to test some symbolic link possibilities.
Reported-by: Roman Fiedler
Signed-off-by: Serge Hallyn <[email protected]>
Acked-by: Stéphane Graber <[email protected]>
CWE ID: CWE-59 | static int mount_entry(const char *fsname, const char *target,
const char *fstype, unsigned long mountflags,
const char *data, int optional, const char *rootfs)
{
#ifdef HAVE_STATVFS
struct statvfs sb;
#endif
if (safe_mount(fsname, target, fstype, mountflags & ~MS_REMOUNT, data, rootfs)) {
if (optional) {
INFO("failed to mount '%s' on '%s' (optional): %s", fsname,
target, strerror(errno));
return 0;
}
else {
SYSERROR("failed to mount '%s' on '%s'", fsname, target);
return -1;
}
}
if ((mountflags & MS_REMOUNT) || (mountflags & MS_BIND)) {
DEBUG("remounting %s on %s to respect bind or remount options",
fsname ? fsname : "(none)", target ? target : "(none)");
unsigned long rqd_flags = 0;
if (mountflags & MS_RDONLY)
rqd_flags |= MS_RDONLY;
#ifdef HAVE_STATVFS
if (statvfs(fsname, &sb) == 0) {
unsigned long required_flags = rqd_flags;
if (sb.f_flag & MS_NOSUID)
required_flags |= MS_NOSUID;
if (sb.f_flag & MS_NODEV)
required_flags |= MS_NODEV;
if (sb.f_flag & MS_RDONLY)
required_flags |= MS_RDONLY;
if (sb.f_flag & MS_NOEXEC)
required_flags |= MS_NOEXEC;
DEBUG("(at remount) flags for %s was %lu, required extra flags are %lu", fsname, sb.f_flag, required_flags);
/*
* If this was a bind mount request, and required_flags
* does not have any flags which are not already in
* mountflags, then skip the remount
*/
if (!(mountflags & MS_REMOUNT)) {
if (!(required_flags & ~mountflags) && rqd_flags == 0) {
DEBUG("mountflags already was %lu, skipping remount",
mountflags);
goto skipremount;
}
}
mountflags |= required_flags;
}
#endif
if (mount(fsname, target, fstype,
mountflags | MS_REMOUNT, data) < 0) {
if (optional) {
INFO("failed to mount '%s' on '%s' (optional): %s",
fsname, target, strerror(errno));
return 0;
}
else {
SYSERROR("failed to mount '%s' on '%s'",
fsname, target);
return -1;
}
}
}
#ifdef HAVE_STATVFS
skipremount:
#endif
DEBUG("mounted '%s' on '%s', type '%s'", fsname, target, fstype);
return 0;
}
| 166,715 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: const BlockEntry* Segment::GetBlock(
const CuePoint& cp,
const CuePoint::TrackPosition& tp)
{
Cluster** const ii = m_clusters;
Cluster** i = ii;
const long count = m_clusterCount + m_clusterPreloadCount;
Cluster** const jj = ii + count;
Cluster** j = jj;
while (i < j)
{
Cluster** const k = i + (j - i) / 2;
assert(k < jj);
Cluster* const pCluster = *k;
assert(pCluster);
const long long pos = pCluster->GetPosition();
assert(pos >= 0);
if (pos < tp.m_pos)
i = k + 1;
else if (pos > tp.m_pos)
j = k;
else
return pCluster->GetEntry(cp, tp);
}
assert(i == j);
Cluster* const pCluster = Cluster::Create(this, -1, tp.m_pos); //, -1);
assert(pCluster);
const ptrdiff_t idx = i - m_clusters;
PreloadCluster(pCluster, idx);
assert(m_clusters);
assert(m_clusterPreloadCount > 0);
assert(m_clusters[idx] == pCluster);
return pCluster->GetEntry(cp, tp);
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | const BlockEntry* Segment::GetBlock(
const long count = m_clusterCount + m_clusterPreloadCount;
Cluster** const jj = ii + count;
Cluster** j = jj;
while (i < j) {
// INVARIANT:
//[ii, i) < pTP->m_pos
//[i, j) ?
//[j, jj) > pTP->m_pos
Cluster** const k = i + (j - i) / 2;
assert(k < jj);
Cluster* const pCluster = *k;
assert(pCluster);
// const long long pos_ = pCluster->m_pos;
// assert(pos_);
// const long long pos = pos_ * ((pos_ < 0) ? -1 : 1);
const long long pos = pCluster->GetPosition();
assert(pos >= 0);
if (pos < tp.m_pos)
i = k + 1;
else if (pos > tp.m_pos)
j = k;
else
return pCluster->GetEntry(cp, tp);
}
assert(i == j);
// assert(Cluster::HasBlockEntries(this, tp.m_pos));
Cluster* const pCluster = Cluster::Create(this, -1, tp.m_pos); //, -1);
assert(pCluster);
const ptrdiff_t idx = i - m_clusters;
PreloadCluster(pCluster, idx);
assert(m_clusters);
assert(m_clusterPreloadCount > 0);
assert(m_clusters[idx] == pCluster);
return pCluster->GetEntry(cp, tp);
}
| 174,288 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
{
struct desc_struct *desc;
short sel;
sel = get_segment_selector(regs, seg_reg_idx);
if (sel < 0)
return -1L;
if (v8086_mode(regs))
/*
* Base is simply the segment selector shifted 4
* bits to the right.
*/
return (unsigned long)(sel << 4);
if (user_64bit_mode(regs)) {
/*
* Only FS or GS will have a base address, the rest of
* the segments' bases are forced to 0.
*/
unsigned long base;
if (seg_reg_idx == INAT_SEG_REG_FS)
rdmsrl(MSR_FS_BASE, base);
else if (seg_reg_idx == INAT_SEG_REG_GS)
/*
* swapgs was called at the kernel entry point. Thus,
* MSR_KERNEL_GS_BASE will have the user-space GS base.
*/
rdmsrl(MSR_KERNEL_GS_BASE, base);
else
base = 0;
return base;
}
/* In protected mode the segment selector cannot be null. */
if (!sel)
return -1L;
desc = get_desc(sel);
if (!desc)
return -1L;
return get_desc_base(desc);
}
Commit Message: x86/insn-eval: Fix use-after-free access to LDT entry
get_desc() computes a pointer into the LDT while holding a lock that
protects the LDT from being freed, but then drops the lock and returns the
(now potentially dangling) pointer to its caller.
Fix it by giving the caller a copy of the LDT entry instead.
Fixes: 670f928ba09b ("x86/insn-eval: Add utility function to get segment descriptor")
Cc: [email protected]
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
CWE ID: CWE-362 | unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
{
struct desc_struct desc;
short sel;
sel = get_segment_selector(regs, seg_reg_idx);
if (sel < 0)
return -1L;
if (v8086_mode(regs))
/*
* Base is simply the segment selector shifted 4
* bits to the right.
*/
return (unsigned long)(sel << 4);
if (user_64bit_mode(regs)) {
/*
* Only FS or GS will have a base address, the rest of
* the segments' bases are forced to 0.
*/
unsigned long base;
if (seg_reg_idx == INAT_SEG_REG_FS)
rdmsrl(MSR_FS_BASE, base);
else if (seg_reg_idx == INAT_SEG_REG_GS)
/*
* swapgs was called at the kernel entry point. Thus,
* MSR_KERNEL_GS_BASE will have the user-space GS base.
*/
rdmsrl(MSR_KERNEL_GS_BASE, base);
else
base = 0;
return base;
}
/* In protected mode the segment selector cannot be null. */
if (!sel)
return -1L;
if (!get_desc(&desc, sel))
return -1L;
return get_desc_base(&desc);
}
| 169,610 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void sctp_generate_timeout_event(struct sctp_association *asoc,
sctp_event_timeout_t timeout_type)
{
struct net *net = sock_net(asoc->base.sk);
int error = 0;
bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy: timer %d\n", __func__,
timeout_type);
/* Try again later. */
if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20)))
sctp_association_hold(asoc);
goto out_unlock;
}
/* Is this association really dead and just waiting around for
* the timer to let go of the reference?
*/
if (asoc->base.dead)
goto out_unlock;
/* Run through the state machine. */
error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
SCTP_ST_TIMEOUT(timeout_type),
asoc->state, asoc->ep, asoc,
(void *)timeout_type, GFP_ATOMIC);
if (error)
asoc->base.sk->sk_err = -error;
out_unlock:
bh_unlock_sock(asoc->base.sk);
sctp_association_put(asoc);
}
Commit Message: sctp: Prevent soft lockup when sctp_accept() is called during a timeout event
A case can occur when sctp_accept() is called by the user during
a heartbeat timeout event after the 4-way handshake. Since
sctp_assoc_migrate() changes both assoc->base.sk and assoc->ep, the
bh_sock_lock in sctp_generate_heartbeat_event() will be taken with
the listening socket but released with the new association socket.
The result is a deadlock on any future attempts to take the listening
socket lock.
Note that this race can occur with other SCTP timeouts that take
the bh_lock_sock() in the event sctp_accept() is called.
BUG: soft lockup - CPU#9 stuck for 67s! [swapper:0]
...
RIP: 0010:[<ffffffff8152d48e>] [<ffffffff8152d48e>] _spin_lock+0x1e/0x30
RSP: 0018:ffff880028323b20 EFLAGS: 00000206
RAX: 0000000000000002 RBX: ffff880028323b20 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffff880028323be0 RDI: ffff8804632c4b48
RBP: ffffffff8100bb93 R08: 0000000000000000 R09: 0000000000000000
R10: ffff880610662280 R11: 0000000000000100 R12: ffff880028323aa0
R13: ffff8804383c3880 R14: ffff880028323a90 R15: ffffffff81534225
FS: 0000000000000000(0000) GS:ffff880028320000(0000) knlGS:0000000000000000
CS: 0010 DS: 0018 ES: 0018 CR0: 000000008005003b
CR2: 00000000006df528 CR3: 0000000001a85000 CR4: 00000000000006e0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
Process swapper (pid: 0, threadinfo ffff880616b70000, task ffff880616b6cab0)
Stack:
ffff880028323c40 ffffffffa01c2582 ffff880614cfb020 0000000000000000
<d> 0100000000000000 00000014383a6c44 ffff8804383c3880 ffff880614e93c00
<d> ffff880614e93c00 0000000000000000 ffff8804632c4b00 ffff8804383c38b8
Call Trace:
<IRQ>
[<ffffffffa01c2582>] ? sctp_rcv+0x492/0xa10 [sctp]
[<ffffffff8148c559>] ? nf_iterate+0x69/0xb0
[<ffffffff814974a0>] ? ip_local_deliver_finish+0x0/0x2d0
[<ffffffff8148c716>] ? nf_hook_slow+0x76/0x120
[<ffffffff814974a0>] ? ip_local_deliver_finish+0x0/0x2d0
[<ffffffff8149757d>] ? ip_local_deliver_finish+0xdd/0x2d0
[<ffffffff81497808>] ? ip_local_deliver+0x98/0xa0
[<ffffffff81496ccd>] ? ip_rcv_finish+0x12d/0x440
[<ffffffff81497255>] ? ip_rcv+0x275/0x350
[<ffffffff8145cfeb>] ? __netif_receive_skb+0x4ab/0x750
...
With lockdep debugging:
=====================================
[ BUG: bad unlock balance detected! ]
-------------------------------------
CslRx/12087 is trying to release lock (slock-AF_INET) at:
[<ffffffffa01bcae0>] sctp_generate_timeout_event+0x40/0xe0 [sctp]
but there are no more locks to release!
other info that might help us debug this:
2 locks held by CslRx/12087:
#0: (&asoc->timers[i]){+.-...}, at: [<ffffffff8108ce1f>] run_timer_softirq+0x16f/0x3e0
#1: (slock-AF_INET){+.-...}, at: [<ffffffffa01bcac3>] sctp_generate_timeout_event+0x23/0xe0 [sctp]
Ensure the socket taken is also the same one that is released by
saving a copy of the socket before entering the timeout event
critical section.
Signed-off-by: Karl Heiss <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-362 | static void sctp_generate_timeout_event(struct sctp_association *asoc,
sctp_event_timeout_t timeout_type)
{
struct sock *sk = asoc->base.sk;
struct net *net = sock_net(sk);
int error = 0;
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
pr_debug("%s: sock is busy: timer %d\n", __func__,
timeout_type);
/* Try again later. */
if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20)))
sctp_association_hold(asoc);
goto out_unlock;
}
/* Is this association really dead and just waiting around for
* the timer to let go of the reference?
*/
if (asoc->base.dead)
goto out_unlock;
/* Run through the state machine. */
error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
SCTP_ST_TIMEOUT(timeout_type),
asoc->state, asoc->ep, asoc,
(void *)timeout_type, GFP_ATOMIC);
if (error)
sk->sk_err = -error;
out_unlock:
bh_unlock_sock(sk);
sctp_association_put(asoc);
}
| 167,503 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int do_tkill(pid_t tgid, pid_t pid, int sig)
{
struct siginfo info;
info.si_signo = sig;
info.si_errno = 0;
info.si_code = SI_TKILL;
info.si_pid = task_tgid_vnr(current);
info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
return do_send_specific(tgid, pid, sig, &info);
}
Commit Message: kernel/signal.c: stop info leak via the tkill and the tgkill syscalls
This fixes a kernel memory contents leak via the tkill and tgkill syscalls
for compat processes.
This is visible in the siginfo_t->_sifields._rt.si_sigval.sival_ptr field
when handling signals delivered from tkill.
The place of the infoleak:
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
{
...
put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr);
...
}
Signed-off-by: Emese Revfy <[email protected]>
Reviewed-by: PaX Team <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Cc: Al Viro <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: "Eric W. Biederman" <[email protected]>
Cc: Serge Hallyn <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
CWE ID: CWE-399 | static int do_tkill(pid_t tgid, pid_t pid, int sig)
{
struct siginfo info = {};
info.si_signo = sig;
info.si_errno = 0;
info.si_code = SI_TKILL;
info.si_pid = task_tgid_vnr(current);
info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
return do_send_specific(tgid, pid, sig, &info);
}
| 166,082 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void VaapiVideoDecodeAccelerator::VaapiH264Accelerator::Reset() {
vaapi_wrapper_->DestroyPendingBuffers();
}
Commit Message: vaapi vda: Delete owned objects on worker thread in Cleanup()
This CL adds a SEQUENCE_CHECKER to Vaapi*Accelerator classes, and
posts the destruction of those objects to the appropriate thread on
Cleanup().
Also makes {H264,VP8,VP9}Picture RefCountedThreadSafe, see miu@
comment in
https://chromium-review.googlesource.com/c/chromium/src/+/794091#message-a64bed985cfaf8a19499a517bb110a7ce581dc0f
TEST=play back VP9/VP8/H264 w/ simplechrome on soraka, Release build
unstripped, let video play for a few seconds then navigate back; no
crashes. Unittests as before:
video_decode_accelerator_unittest --test_video_data=test-25fps.vp9:320:240:250:250:35:150:12
video_decode_accelerator_unittest --test_video_data=test-25fps.vp8:320:240:250:250:35:150:11
video_decode_accelerator_unittest --test_video_data=test-25fps.h264:320:240:250:258:35:150:1
Bug: 789160
Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: I7d96aaf89c92bf46f00c8b8b36798e057a842ed2
Reviewed-on: https://chromium-review.googlesource.com/794091
Reviewed-by: Pawel Osciak <[email protected]>
Commit-Queue: Miguel Casas <[email protected]>
Cr-Commit-Position: refs/heads/master@{#523372}
CWE ID: CWE-362 | void VaapiVideoDecodeAccelerator::VaapiH264Accelerator::Reset() {
DETACH_FROM_SEQUENCE(sequence_checker_);
vaapi_wrapper_->DestroyPendingBuffers();
}
| 172,807 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static bool check_underflow(const struct ip6t_entry *e)
{
const struct xt_entry_target *t;
unsigned int verdict;
if (!unconditional(&e->ipv6))
return false;
t = ip6t_get_target_c(e);
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
return false;
verdict = ((struct xt_standard_target *)t)->verdict;
verdict = -verdict - 1;
return verdict == NF_DROP || verdict == NF_ACCEPT;
}
Commit Message: netfilter: x_tables: fix unconditional helper
Ben Hawkes says:
In the mark_source_chains function (net/ipv4/netfilter/ip_tables.c) it
is possible for a user-supplied ipt_entry structure to have a large
next_offset field. This field is not bounds checked prior to writing a
counter value at the supplied offset.
Problem is that mark_source_chains should not have been called --
the rule doesn't have a next entry, so its supposed to return
an absolute verdict of either ACCEPT or DROP.
However, the function conditional() doesn't work as the name implies.
It only checks that the rule is using wildcard address matching.
However, an unconditional rule must also not be using any matches
(no -m args).
The underflow validator only checked the addresses, therefore
passing the 'unconditional absolute verdict' test, while
mark_source_chains also tested for presence of matches, and thus
proceeeded to the next (not-existent) rule.
Unify this so that all the callers have same idea of 'unconditional rule'.
Reported-by: Ben Hawkes <[email protected]>
Signed-off-by: Florian Westphal <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
CWE ID: CWE-119 | static bool check_underflow(const struct ip6t_entry *e)
{
const struct xt_entry_target *t;
unsigned int verdict;
if (!unconditional(e))
return false;
t = ip6t_get_target_c(e);
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
return false;
verdict = ((struct xt_standard_target *)t)->verdict;
verdict = -verdict - 1;
return verdict == NF_DROP || verdict == NF_ACCEPT;
}
| 167,373 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static enum led_brightness k90_backlight_get(struct led_classdev *led_cdev)
{
int ret;
struct k90_led *led = container_of(led_cdev, struct k90_led, cdev);
struct device *dev = led->cdev.dev->parent;
struct usb_interface *usbif = to_usb_interface(dev->parent);
struct usb_device *usbdev = interface_to_usbdev(usbif);
int brightness;
char data[8];
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
K90_REQUEST_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0, 0, data, 8,
USB_CTRL_SET_TIMEOUT);
if (ret < 0) {
dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
ret);
return -EIO;
}
brightness = data[4];
if (brightness < 0 || brightness > 3) {
dev_warn(dev,
"Read invalid backlight brightness: %02hhx.\n",
data[4]);
return -EIO;
}
return brightness;
}
Commit Message: HID: corsair: fix DMA buffers on stack
Not all platforms support DMA to the stack, and specifically since v4.9
this is no longer supported on x86 with VMAP_STACK either.
Note that the macro-mode buffer was larger than necessary.
Fixes: 6f78193ee9ea ("HID: corsair: Add Corsair Vengeance K90 driver")
Cc: stable <[email protected]>
Signed-off-by: Johan Hovold <[email protected]>
Signed-off-by: Jiri Kosina <[email protected]>
CWE ID: CWE-119 | static enum led_brightness k90_backlight_get(struct led_classdev *led_cdev)
{
int ret;
struct k90_led *led = container_of(led_cdev, struct k90_led, cdev);
struct device *dev = led->cdev.dev->parent;
struct usb_interface *usbif = to_usb_interface(dev->parent);
struct usb_device *usbdev = interface_to_usbdev(usbif);
int brightness;
char *data;
data = kmalloc(8, GFP_KERNEL);
if (!data)
return -ENOMEM;
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
K90_REQUEST_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0, 0, data, 8,
USB_CTRL_SET_TIMEOUT);
if (ret < 0) {
dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
ret);
ret = -EIO;
goto out;
}
brightness = data[4];
if (brightness < 0 || brightness > 3) {
dev_warn(dev,
"Read invalid backlight brightness: %02hhx.\n",
data[4]);
ret = -EIO;
goto out;
}
ret = brightness;
out:
kfree(data);
return ret;
}
| 168,393 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: GPMF_ERR IsValidSize(GPMF_stream *ms, uint32_t size) // size is in longs not bytes.
{
if (ms)
{
int32_t nestsize = (int32_t)ms->nest_size[ms->nest_level];
if (nestsize == 0 && ms->nest_level == 0)
nestsize = ms->buffer_size_longs;
if (size + 2 <= nestsize) return GPMF_OK;
}
return GPMF_ERROR_BAD_STRUCTURE;
}
Commit Message: fixed many security issues with the too crude mp4 reader
CWE ID: CWE-787 | GPMF_ERR IsValidSize(GPMF_stream *ms, uint32_t size) // size is in longs not bytes.
{
if (ms)
{
uint32_t nestsize = (uint32_t)ms->nest_size[ms->nest_level];
if (nestsize == 0 && ms->nest_level == 0)
nestsize = ms->buffer_size_longs;
if (size + 2 <= nestsize) return GPMF_OK;
}
return GPMF_ERROR_BAD_STRUCTURE;
}
| 169,544 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.