instruction
stringclasses
1 value
input
stringlengths
90
139k
output
stringlengths
16
138k
__index_level_0__
int64
165k
175k
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: XSharedMemoryId AttachSharedMemory(Display* display, int shared_memory_key) { DCHECK(QuerySharedMemorySupport(display)); XShmSegmentInfo shminfo; memset(&shminfo, 0, sizeof(shminfo)); shminfo.shmid = shared_memory_key; if (!XShmAttach(display, &shminfo)) NOTREACHED(); return shminfo.shmseg; } Commit Message: Make shared memory segments writable only by their rightful owners. BUG=143859 TEST=Chrome's UI still works on Linux and Chrome OS Review URL: https://chromiumcodereview.appspot.com/10854242 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@158289 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-264
XSharedMemoryId AttachSharedMemory(Display* display, int shared_memory_key) { DCHECK(QuerySharedMemorySupport(display)); XShmSegmentInfo shminfo; memset(&shminfo, 0, sizeof(shminfo)); shminfo.shmid = shared_memory_key; if (!XShmAttach(display, &shminfo)) { LOG(WARNING) << "X failed to attach to shared memory segment " << shminfo.shmid; NOTREACHED(); } else { VLOG(1) << "X attached to shared memory segment " << shminfo.shmid; } return shminfo.shmseg; }
171,593
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void NaClProcessHost::OnProcessLaunched() { FilePath irt_path; const char* irt_path_var = getenv("NACL_IRT_LIBRARY"); if (irt_path_var != NULL) { FilePath::StringType string(irt_path_var, irt_path_var + strlen(irt_path_var)); irt_path = FilePath(string); } else { FilePath plugin_dir; if (!PathService::Get(chrome::DIR_INTERNAL_PLUGINS, &plugin_dir)) { LOG(ERROR) << "Failed to locate the plugins directory"; delete this; return; } irt_path = plugin_dir.Append(GetIrtLibraryFilename()); } base::FileUtilProxy::CreateOrOpenCallback* callback = callback_factory_.NewCallback(&NaClProcessHost::OpenIrtFileDone); if (!base::FileUtilProxy::CreateOrOpen( BrowserThread::GetMessageLoopProxyForThread(BrowserThread::FILE), irt_path, base::PLATFORM_FILE_OPEN | base::PLATFORM_FILE_READ, callback)) { delete callback; delete this; } } Commit Message: Fix a small leak in FileUtilProxy BUG=none TEST=green mem bots Review URL: http://codereview.chromium.org/7669046 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@97451 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
void NaClProcessHost::OnProcessLaunched() { FilePath irt_path; const char* irt_path_var = getenv("NACL_IRT_LIBRARY"); if (irt_path_var != NULL) { FilePath::StringType string(irt_path_var, irt_path_var + strlen(irt_path_var)); irt_path = FilePath(string); } else { FilePath plugin_dir; if (!PathService::Get(chrome::DIR_INTERNAL_PLUGINS, &plugin_dir)) { LOG(ERROR) << "Failed to locate the plugins directory"; delete this; return; } irt_path = plugin_dir.Append(GetIrtLibraryFilename()); } base::FileUtilProxy::CreateOrOpenCallback* callback = callback_factory_.NewCallback(&NaClProcessHost::OpenIrtFileDone); if (!base::FileUtilProxy::CreateOrOpen( BrowserThread::GetMessageLoopProxyForThread(BrowserThread::FILE), irt_path, base::PLATFORM_FILE_OPEN | base::PLATFORM_FILE_READ, callback)) { delete this; } }
170,275
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: zlib_run(struct zlib *zlib) /* Like zlib_advance but also handles a stream of IDAT chunks. */ { /* The 'extra_bytes' field is set by zlib_advance if there is extra * compressed data in the chunk it handles (if it sees Z_STREAM_END before * all the input data has been used.) This function uses the value to update * the correct chunk length, so the problem should only ever be detected once * for each chunk. zlib_advance outputs the error message, though see the * IDAT specific check below. */ zlib->extra_bytes = 0; if (zlib->idat != NULL) { struct IDAT_list *list = zlib->idat->idat_list_head; struct IDAT_list *last = zlib->idat->idat_list_tail; int skip = 0; /* 'rewrite_offset' is the offset of the LZ data within the chunk, for * IDAT it should be 0: */ assert(zlib->rewrite_offset == 0); /* Process each IDAT_list in turn; the caller has left the stream * positioned at the start of the first IDAT chunk data. */ for (;;) { const unsigned int count = list->count; unsigned int i; for (i = 0; i<count; ++i) { int rc; if (skip > 0) /* Skip CRC and next IDAT header */ skip_12(zlib->file); skip = 12; /* for the next time */ rc = zlib_advance(zlib, list->lengths[i]); switch (rc) { case ZLIB_OK: /* keep going */ break; case ZLIB_STREAM_END: /* stop */ /* There may be extra chunks; if there are and one of them is * not zero length output the 'extra data' message. Only do * this check if errors are being output. */ if (zlib->global->errors && zlib->extra_bytes == 0) { struct IDAT_list *check = list; int j = i+1, jcount = count; for (;;) { for (; j<jcount; ++j) if (check->lengths[j] > 0) { chunk_message(zlib->chunk, "extra compressed data"); goto end_check; } if (check == last) break; check = check->next; jcount = check->count; j = 0; } } end_check: /* Terminate the list at the current position, reducing the * length of the last IDAT too if required. */ list->lengths[i] -= zlib->extra_bytes; list->count = i+1; zlib->idat->idat_list_tail = list; /* FALL THROUGH */ default: return rc; } } /* At the end of the compressed data and Z_STREAM_END was not seen. */ if (list == last) return ZLIB_OK; list = list->next; } } else { struct chunk *chunk = zlib->chunk; int rc; assert(zlib->rewrite_offset < chunk->chunk_length); rc = zlib_advance(zlib, chunk->chunk_length - zlib->rewrite_offset); /* The extra bytes in the chunk are handled now by adjusting the chunk * length to exclude them; the zlib data is always stored at the end of * the PNG chunk (although clearly this is not necessary.) zlib_advance * has already output a warning message. */ chunk->chunk_length -= zlib->extra_bytes; return rc; } } Commit Message: DO NOT MERGE Update libpng to 1.6.20 BUG:23265085 Change-Id: I85199805636d771f3597b691b63bc0bf46084833 (cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82) CWE ID:
zlib_run(struct zlib *zlib) /* Like zlib_advance but also handles a stream of IDAT chunks. */ { /* The 'extra_bytes' field is set by zlib_advance if there is extra * compressed data in the chunk it handles (if it sees Z_STREAM_END before * all the input data has been used.) This function uses the value to update * the correct chunk length, so the problem should only ever be detected once * for each chunk. zlib_advance outputs the error message, though see the * IDAT specific check below. */ zlib->extra_bytes = 0; if (zlib->idat != NULL) { struct IDAT_list *list = zlib->idat->idat_list_head; struct IDAT_list *last = zlib->idat->idat_list_tail; int skip = 0; /* 'rewrite_offset' is the offset of the LZ data within the chunk, for * IDAT it should be 0: */ assert(zlib->rewrite_offset == 0); /* Process each IDAT_list in turn; the caller has left the stream * positioned at the start of the first IDAT chunk data. */ for (;;) { const unsigned int count = list->count; unsigned int i; for (i = 0; i<count; ++i) { int rc; if (skip > 0) /* Skip CRC and next IDAT header */ skip_12(zlib->file); skip = 12; /* for the next time */ rc = zlib_advance(zlib, list->lengths[i]); switch (rc) { case ZLIB_OK: /* keep going */ break; case ZLIB_STREAM_END: /* stop */ /* There may be extra chunks; if there are and one of them is * not zero length output the 'extra data' message. Only do * this check if errors are being output. */ if (zlib->global->errors && zlib->extra_bytes == 0) { struct IDAT_list *check = list; int j = i+1, jcount = count; for (;;) { for (; j<jcount; ++j) if (check->lengths[j] > 0) { chunk_message(zlib->chunk, "extra compressed data"); goto end_check; } if (check == last) break; check = check->next; jcount = check->count; j = 0; } } end_check: /* Terminate the list at the current position, reducing the * length of the last IDAT too if required. */ list->lengths[i] -= zlib->extra_bytes; list->count = i+1; zlib->idat->idat_list_tail = list; /* FALL THROUGH */ default: return rc; } } /* At the end of the compressed data and Z_STREAM_END was not seen. */ if (list == last) return ZLIB_OK; list = list->next; } } else { struct chunk *chunk = zlib->chunk; int rc; assert(zlib->rewrite_offset < chunk->chunk_length); rc = zlib_advance(zlib, chunk->chunk_length - zlib->rewrite_offset); /* The extra bytes in the chunk are handled now by adjusting the chunk * length to exclude them; the zlib data is always stored at the end of * the PNG chunk (although clearly this is not necessary.) zlib_advance * has already output a warning message. */ chunk->chunk_length -= zlib->extra_bytes; return rc; } }
173,743
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: LayoutUnit RenderBox::availableLogicalHeightUsing(const Length& h) const { if (h.isFixed()) return computeContentBoxLogicalHeight(h.value()); if (isRenderView()) return isHorizontalWritingMode() ? toRenderView(this)->frameView()->visibleHeight() : toRenderView(this)->frameView()->visibleWidth(); if (isTableCell() && (h.isAuto() || h.isPercent())) return overrideHeight() - borderAndPaddingLogicalWidth(); if (h.isPercent()) return computeContentBoxLogicalHeight(h.calcValue(containingBlock()->availableLogicalHeight())); if (isRenderBlock() && isPositioned() && style()->height().isAuto() && !(style()->top().isAuto() || style()->bottom().isAuto())) { RenderBlock* block = const_cast<RenderBlock*>(toRenderBlock(this)); LayoutUnit oldHeight = block->logicalHeight(); block->computeLogicalHeight(); LayoutUnit newHeight = block->computeContentBoxLogicalHeight(block->contentLogicalHeight()); block->setLogicalHeight(oldHeight); return computeContentBoxLogicalHeight(newHeight); } return containingBlock()->availableLogicalHeight(); } Commit Message: Source/WebCore: Fix for bug 64046 - Wrong image height in absolutely positioned div in relatively positioned parent with bottom padding. https://bugs.webkit.org/show_bug.cgi?id=64046 Patch by Kulanthaivel Palanichamy <[email protected]> on 2011-07-21 Reviewed by David Hyatt. Test: fast/css/absolute-child-with-percent-height-inside-relative-parent.html * rendering/RenderBox.cpp: (WebCore::RenderBox::availableLogicalHeightUsing): LayoutTests: Test to cover absolutely positioned child with percentage height in relatively positioned parent with bottom padding. https://bugs.webkit.org/show_bug.cgi?id=64046 Patch by Kulanthaivel Palanichamy <[email protected]> on 2011-07-21 Reviewed by David Hyatt. * fast/css/absolute-child-with-percent-height-inside-relative-parent-expected.txt: Added. * fast/css/absolute-child-with-percent-height-inside-relative-parent.html: Added. git-svn-id: svn://svn.chromium.org/blink/trunk@91533 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-20
LayoutUnit RenderBox::availableLogicalHeightUsing(const Length& h) const { if (h.isFixed()) return computeContentBoxLogicalHeight(h.value()); if (isRenderView()) return isHorizontalWritingMode() ? toRenderView(this)->frameView()->visibleHeight() : toRenderView(this)->frameView()->visibleWidth(); if (isTableCell() && (h.isAuto() || h.isPercent())) return overrideHeight() - borderAndPaddingLogicalWidth(); if (h.isPercent()) { LayoutUnit availableHeight; // https://bugs.webkit.org/show_bug.cgi?id=64046 // For absolutely positioned elements whose containing block is based on a block-level element, // the percentage is calculated with respect to the height of the padding box of that element if (isPositioned()) availableHeight = containingBlockLogicalHeightForPositioned(containingBlock()); else availableHeight = containingBlock()->availableLogicalHeight(); return computeContentBoxLogicalHeight(h.calcValue(availableHeight)); } if (isRenderBlock() && isPositioned() && style()->height().isAuto() && !(style()->top().isAuto() || style()->bottom().isAuto())) { RenderBlock* block = const_cast<RenderBlock*>(toRenderBlock(this)); LayoutUnit oldHeight = block->logicalHeight(); block->computeLogicalHeight(); LayoutUnit newHeight = block->computeContentBoxLogicalHeight(block->contentLogicalHeight()); block->setLogicalHeight(oldHeight); return computeContentBoxLogicalHeight(newHeight); } return containingBlock()->availableLogicalHeight(); }
170,617
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: spnego_gss_import_sec_context( OM_uint32 *minor_status, const gss_buffer_t interprocess_token, gss_ctx_id_t *context_handle) { OM_uint32 ret; ret = gss_import_sec_context(minor_status, interprocess_token, context_handle); return (ret); } Commit Message: Fix SPNEGO context aliasing bugs [CVE-2015-2695] The SPNEGO mechanism currently replaces its context handle with the mechanism context handle upon establishment, under the assumption that most GSS functions are only called after context establishment. This assumption is incorrect, and can lead to aliasing violations for some programs. Maintain the SPNEGO context structure after context establishment and refer to it in all GSS methods. Add initiate and opened flags to the SPNEGO context structure for use in gss_inquire_context() prior to context establishment. CVE-2015-2695: In MIT krb5 1.5 and later, applications which call gss_inquire_context() on a partially-established SPNEGO context can cause the GSS-API library to read from a pointer using the wrong type, generally causing a process crash. This bug may go unnoticed, because the most common SPNEGO authentication scenario establishes the context after just one call to gss_accept_sec_context(). Java server applications using the native JGSS provider are vulnerable to this bug. A carefully crafted SPNEGO packet might allow the gss_inquire_context() call to succeed with attacker-determined results, but applications should not make access control decisions based on gss_inquire_context() results prior to context establishment. CVSSv2 Vector: AV:N/AC:M/Au:N/C:N/I:N/A:C/E:POC/RL:OF/RC:C [[email protected]: several bugfixes, style changes, and edge-case behavior changes; commit message and CVE description] ticket: 8244 target_version: 1.14 tags: pullup CWE ID: CWE-18
spnego_gss_import_sec_context( OM_uint32 *minor_status, const gss_buffer_t interprocess_token, gss_ctx_id_t *context_handle) { /* * Until we implement partial context exports, there are no SPNEGO * exported context tokens, only tokens for underlying mechs. So just * return an error for now. */ return GSS_S_UNAVAILABLE; }
166,659
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: jas_image_t *jp2_decode(jas_stream_t *in, char *optstr) { jp2_box_t *box; int found; jas_image_t *image; jp2_dec_t *dec; bool samedtype; int dtype; unsigned int i; jp2_cmap_t *cmapd; jp2_pclr_t *pclrd; jp2_cdef_t *cdefd; unsigned int channo; int newcmptno; int_fast32_t *lutents; #if 0 jp2_cdefchan_t *cdefent; int cmptno; #endif jp2_cmapent_t *cmapent; jas_icchdr_t icchdr; jas_iccprof_t *iccprof; dec = 0; box = 0; image = 0; if (!(dec = jp2_dec_create())) { goto error; } /* Get the first box. This should be a JP box. */ if (!(box = jp2_box_get(in))) { jas_eprintf("error: cannot get box\n"); goto error; } if (box->type != JP2_BOX_JP) { jas_eprintf("error: expecting signature box\n"); goto error; } if (box->data.jp.magic != JP2_JP_MAGIC) { jas_eprintf("incorrect magic number\n"); goto error; } jp2_box_destroy(box); box = 0; /* Get the second box. This should be a FTYP box. */ if (!(box = jp2_box_get(in))) { goto error; } if (box->type != JP2_BOX_FTYP) { jas_eprintf("expecting file type box\n"); goto error; } jp2_box_destroy(box); box = 0; /* Get more boxes... */ found = 0; while ((box = jp2_box_get(in))) { if (jas_getdbglevel() >= 1) { jas_eprintf("box type %s\n", box->info->name); } switch (box->type) { case JP2_BOX_JP2C: found = 1; break; case JP2_BOX_IHDR: if (!dec->ihdr) { dec->ihdr = box; box = 0; } break; case JP2_BOX_BPCC: if (!dec->bpcc) { dec->bpcc = box; box = 0; } break; case JP2_BOX_CDEF: if (!dec->cdef) { dec->cdef = box; box = 0; } break; case JP2_BOX_PCLR: if (!dec->pclr) { dec->pclr = box; box = 0; } break; case JP2_BOX_CMAP: if (!dec->cmap) { dec->cmap = box; box = 0; } break; case JP2_BOX_COLR: if (!dec->colr) { dec->colr = box; box = 0; } break; } if (box) { jp2_box_destroy(box); box = 0; } if (found) { break; } } if (!found) { jas_eprintf("error: no code stream found\n"); goto error; } if (!(dec->image = jpc_decode(in, optstr))) { jas_eprintf("error: cannot decode code stream\n"); goto error; } /* An IHDR box must be present. */ if (!dec->ihdr) { jas_eprintf("error: missing IHDR box\n"); goto error; } /* Does the number of components indicated in the IHDR box match the value specified in the code stream? */ if (dec->ihdr->data.ihdr.numcmpts != JAS_CAST(uint, jas_image_numcmpts(dec->image))) { jas_eprintf("warning: number of components mismatch\n"); } /* At least one component must be present. */ if (!jas_image_numcmpts(dec->image)) { jas_eprintf("error: no components\n"); goto error; } /* Determine if all components have the same data type. */ samedtype = true; dtype = jas_image_cmptdtype(dec->image, 0); for (i = 1; i < JAS_CAST(uint, jas_image_numcmpts(dec->image)); ++i) { if (jas_image_cmptdtype(dec->image, i) != dtype) { samedtype = false; break; } } /* Is the component data type indicated in the IHDR box consistent with the data in the code stream? */ if ((samedtype && dec->ihdr->data.ihdr.bpc != JP2_DTYPETOBPC(dtype)) || (!samedtype && dec->ihdr->data.ihdr.bpc != JP2_IHDR_BPCNULL)) { jas_eprintf("warning: component data type mismatch\n"); } /* Is the compression type supported? */ if (dec->ihdr->data.ihdr.comptype != JP2_IHDR_COMPTYPE) { jas_eprintf("error: unsupported compression type\n"); goto error; } if (dec->bpcc) { /* Is the number of components indicated in the BPCC box consistent with the code stream data? */ if (dec->bpcc->data.bpcc.numcmpts != JAS_CAST(uint, jas_image_numcmpts( dec->image))) { jas_eprintf("warning: number of components mismatch\n"); } /* Is the component data type information indicated in the BPCC box consistent with the code stream data? */ if (!samedtype) { for (i = 0; i < JAS_CAST(uint, jas_image_numcmpts(dec->image)); ++i) { if (jas_image_cmptdtype(dec->image, i) != JP2_BPCTODTYPE(dec->bpcc->data.bpcc.bpcs[i])) { jas_eprintf("warning: component data type mismatch\n"); } } } else { jas_eprintf("warning: superfluous BPCC box\n"); } } /* A COLR box must be present. */ if (!dec->colr) { jas_eprintf("error: no COLR box\n"); goto error; } switch (dec->colr->data.colr.method) { case JP2_COLR_ENUM: jas_image_setclrspc(dec->image, jp2_getcs(&dec->colr->data.colr)); break; case JP2_COLR_ICC: iccprof = jas_iccprof_createfrombuf(dec->colr->data.colr.iccp, dec->colr->data.colr.iccplen); if (!iccprof) { jas_eprintf("error: failed to parse ICC profile\n"); goto error; } jas_iccprof_gethdr(iccprof, &icchdr); jas_eprintf("ICC Profile CS %08x\n", icchdr.colorspc); jas_image_setclrspc(dec->image, fromiccpcs(icchdr.colorspc)); dec->image->cmprof_ = jas_cmprof_createfromiccprof(iccprof); assert(dec->image->cmprof_); jas_iccprof_destroy(iccprof); break; } /* If a CMAP box is present, a PCLR box must also be present. */ if (dec->cmap && !dec->pclr) { jas_eprintf("warning: missing PCLR box or superfluous CMAP box\n"); jp2_box_destroy(dec->cmap); dec->cmap = 0; } /* If a CMAP box is not present, a PCLR box must not be present. */ if (!dec->cmap && dec->pclr) { jas_eprintf("warning: missing CMAP box or superfluous PCLR box\n"); jp2_box_destroy(dec->pclr); dec->pclr = 0; } /* Determine the number of channels (which is essentially the number of components after any palette mappings have been applied). */ dec->numchans = dec->cmap ? dec->cmap->data.cmap.numchans : JAS_CAST(uint, jas_image_numcmpts(dec->image)); /* Perform a basic sanity check on the CMAP box if present. */ if (dec->cmap) { for (i = 0; i < dec->numchans; ++i) { /* Is the component number reasonable? */ if (dec->cmap->data.cmap.ents[i].cmptno >= JAS_CAST(uint, jas_image_numcmpts(dec->image))) { jas_eprintf("error: invalid component number in CMAP box\n"); goto error; } /* Is the LUT index reasonable? */ if (dec->cmap->data.cmap.ents[i].pcol >= dec->pclr->data.pclr.numchans) { jas_eprintf("error: invalid CMAP LUT index\n"); goto error; } } } /* Allocate space for the channel-number to component-number LUT. */ if (!(dec->chantocmptlut = jas_alloc2(dec->numchans, sizeof(uint_fast16_t)))) { jas_eprintf("error: no memory\n"); goto error; } if (!dec->cmap) { for (i = 0; i < dec->numchans; ++i) { dec->chantocmptlut[i] = i; } } else { cmapd = &dec->cmap->data.cmap; pclrd = &dec->pclr->data.pclr; cdefd = &dec->cdef->data.cdef; for (channo = 0; channo < cmapd->numchans; ++channo) { cmapent = &cmapd->ents[channo]; if (cmapent->map == JP2_CMAP_DIRECT) { dec->chantocmptlut[channo] = channo; } else if (cmapent->map == JP2_CMAP_PALETTE) { lutents = jas_alloc2(pclrd->numlutents, sizeof(int_fast32_t)); for (i = 0; i < pclrd->numlutents; ++i) { lutents[i] = pclrd->lutdata[cmapent->pcol + i * pclrd->numchans]; } newcmptno = jas_image_numcmpts(dec->image); jas_image_depalettize(dec->image, cmapent->cmptno, pclrd->numlutents, lutents, JP2_BPCTODTYPE(pclrd->bpc[cmapent->pcol]), newcmptno); dec->chantocmptlut[channo] = newcmptno; jas_free(lutents); #if 0 if (dec->cdef) { cdefent = jp2_cdef_lookup(cdefd, channo); if (!cdefent) { abort(); } jas_image_setcmpttype(dec->image, newcmptno, jp2_getct(jas_image_clrspc(dec->image), cdefent->type, cdefent->assoc)); } else { jas_image_setcmpttype(dec->image, newcmptno, jp2_getct(jas_image_clrspc(dec->image), 0, channo + 1)); } #endif } } } /* Mark all components as being of unknown type. */ for (i = 0; i < JAS_CAST(uint, jas_image_numcmpts(dec->image)); ++i) { jas_image_setcmpttype(dec->image, i, JAS_IMAGE_CT_UNKNOWN); } /* Determine the type of each component. */ if (dec->cdef) { for (i = 0; i < dec->numchans; ++i) { /* Is the channel number reasonable? */ if (dec->cdef->data.cdef.ents[i].channo >= dec->numchans) { jas_eprintf("error: invalid channel number in CDEF box\n"); goto error; } jas_image_setcmpttype(dec->image, dec->chantocmptlut[dec->cdef->data.cdef.ents[i].channo], jp2_getct(jas_image_clrspc(dec->image), dec->cdef->data.cdef.ents[i].type, dec->cdef->data.cdef.ents[i].assoc)); } } else { for (i = 0; i < dec->numchans; ++i) { jas_image_setcmpttype(dec->image, dec->chantocmptlut[i], jp2_getct(jas_image_clrspc(dec->image), 0, i + 1)); } } /* Delete any components that are not of interest. */ for (i = jas_image_numcmpts(dec->image); i > 0; --i) { if (jas_image_cmpttype(dec->image, i - 1) == JAS_IMAGE_CT_UNKNOWN) { jas_image_delcmpt(dec->image, i - 1); } } /* Ensure that some components survived. */ if (!jas_image_numcmpts(dec->image)) { jas_eprintf("error: no components\n"); goto error; } #if 0 jas_eprintf("no of components is %d\n", jas_image_numcmpts(dec->image)); #endif /* Prevent the image from being destroyed later. */ image = dec->image; dec->image = 0; jp2_dec_destroy(dec); return image; error: if (box) { jp2_box_destroy(box); } if (dec) { jp2_dec_destroy(dec); } return 0; } Commit Message: Fixed a bug that resulted in the destruction of JP2 box data that had never been constructed in the first place. CWE ID: CWE-476
jas_image_t *jp2_decode(jas_stream_t *in, char *optstr) { jp2_box_t *box; int found; jas_image_t *image; jp2_dec_t *dec; bool samedtype; int dtype; unsigned int i; jp2_cmap_t *cmapd; jp2_pclr_t *pclrd; jp2_cdef_t *cdefd; unsigned int channo; int newcmptno; int_fast32_t *lutents; #if 0 jp2_cdefchan_t *cdefent; int cmptno; #endif jp2_cmapent_t *cmapent; jas_icchdr_t icchdr; jas_iccprof_t *iccprof; dec = 0; box = 0; image = 0; if (!(dec = jp2_dec_create())) { goto error; } /* Get the first box. This should be a JP box. */ if (!(box = jp2_box_get(in))) { jas_eprintf("error: cannot get box\n"); goto error; } if (box->type != JP2_BOX_JP) { jas_eprintf("error: expecting signature box\n"); goto error; } if (box->data.jp.magic != JP2_JP_MAGIC) { jas_eprintf("incorrect magic number\n"); goto error; } jp2_box_destroy(box); box = 0; /* Get the second box. This should be a FTYP box. */ if (!(box = jp2_box_get(in))) { goto error; } if (box->type != JP2_BOX_FTYP) { jas_eprintf("expecting file type box\n"); goto error; } jp2_box_destroy(box); box = 0; /* Get more boxes... */ found = 0; while ((box = jp2_box_get(in))) { if (jas_getdbglevel() >= 1) { jas_eprintf("got box type %s\n", box->info->name); } switch (box->type) { case JP2_BOX_JP2C: found = 1; break; case JP2_BOX_IHDR: if (!dec->ihdr) { dec->ihdr = box; box = 0; } break; case JP2_BOX_BPCC: if (!dec->bpcc) { dec->bpcc = box; box = 0; } break; case JP2_BOX_CDEF: if (!dec->cdef) { dec->cdef = box; box = 0; } break; case JP2_BOX_PCLR: if (!dec->pclr) { dec->pclr = box; box = 0; } break; case JP2_BOX_CMAP: if (!dec->cmap) { dec->cmap = box; box = 0; } break; case JP2_BOX_COLR: if (!dec->colr) { dec->colr = box; box = 0; } break; } if (box) { jp2_box_destroy(box); box = 0; } if (found) { break; } } if (!found) { jas_eprintf("error: no code stream found\n"); goto error; } if (!(dec->image = jpc_decode(in, optstr))) { jas_eprintf("error: cannot decode code stream\n"); goto error; } /* An IHDR box must be present. */ if (!dec->ihdr) { jas_eprintf("error: missing IHDR box\n"); goto error; } /* Does the number of components indicated in the IHDR box match the value specified in the code stream? */ if (dec->ihdr->data.ihdr.numcmpts != JAS_CAST(uint, jas_image_numcmpts(dec->image))) { jas_eprintf("warning: number of components mismatch\n"); } /* At least one component must be present. */ if (!jas_image_numcmpts(dec->image)) { jas_eprintf("error: no components\n"); goto error; } /* Determine if all components have the same data type. */ samedtype = true; dtype = jas_image_cmptdtype(dec->image, 0); for (i = 1; i < JAS_CAST(uint, jas_image_numcmpts(dec->image)); ++i) { if (jas_image_cmptdtype(dec->image, i) != dtype) { samedtype = false; break; } } /* Is the component data type indicated in the IHDR box consistent with the data in the code stream? */ if ((samedtype && dec->ihdr->data.ihdr.bpc != JP2_DTYPETOBPC(dtype)) || (!samedtype && dec->ihdr->data.ihdr.bpc != JP2_IHDR_BPCNULL)) { jas_eprintf("warning: component data type mismatch\n"); } /* Is the compression type supported? */ if (dec->ihdr->data.ihdr.comptype != JP2_IHDR_COMPTYPE) { jas_eprintf("error: unsupported compression type\n"); goto error; } if (dec->bpcc) { /* Is the number of components indicated in the BPCC box consistent with the code stream data? */ if (dec->bpcc->data.bpcc.numcmpts != JAS_CAST(uint, jas_image_numcmpts( dec->image))) { jas_eprintf("warning: number of components mismatch\n"); } /* Is the component data type information indicated in the BPCC box consistent with the code stream data? */ if (!samedtype) { for (i = 0; i < JAS_CAST(uint, jas_image_numcmpts(dec->image)); ++i) { if (jas_image_cmptdtype(dec->image, i) != JP2_BPCTODTYPE(dec->bpcc->data.bpcc.bpcs[i])) { jas_eprintf("warning: component data type mismatch\n"); } } } else { jas_eprintf("warning: superfluous BPCC box\n"); } } /* A COLR box must be present. */ if (!dec->colr) { jas_eprintf("error: no COLR box\n"); goto error; } switch (dec->colr->data.colr.method) { case JP2_COLR_ENUM: jas_image_setclrspc(dec->image, jp2_getcs(&dec->colr->data.colr)); break; case JP2_COLR_ICC: iccprof = jas_iccprof_createfrombuf(dec->colr->data.colr.iccp, dec->colr->data.colr.iccplen); if (!iccprof) { jas_eprintf("error: failed to parse ICC profile\n"); goto error; } jas_iccprof_gethdr(iccprof, &icchdr); jas_eprintf("ICC Profile CS %08x\n", icchdr.colorspc); jas_image_setclrspc(dec->image, fromiccpcs(icchdr.colorspc)); dec->image->cmprof_ = jas_cmprof_createfromiccprof(iccprof); assert(dec->image->cmprof_); jas_iccprof_destroy(iccprof); break; } /* If a CMAP box is present, a PCLR box must also be present. */ if (dec->cmap && !dec->pclr) { jas_eprintf("warning: missing PCLR box or superfluous CMAP box\n"); jp2_box_destroy(dec->cmap); dec->cmap = 0; } /* If a CMAP box is not present, a PCLR box must not be present. */ if (!dec->cmap && dec->pclr) { jas_eprintf("warning: missing CMAP box or superfluous PCLR box\n"); jp2_box_destroy(dec->pclr); dec->pclr = 0; } /* Determine the number of channels (which is essentially the number of components after any palette mappings have been applied). */ dec->numchans = dec->cmap ? dec->cmap->data.cmap.numchans : JAS_CAST(uint, jas_image_numcmpts(dec->image)); /* Perform a basic sanity check on the CMAP box if present. */ if (dec->cmap) { for (i = 0; i < dec->numchans; ++i) { /* Is the component number reasonable? */ if (dec->cmap->data.cmap.ents[i].cmptno >= JAS_CAST(uint, jas_image_numcmpts(dec->image))) { jas_eprintf("error: invalid component number in CMAP box\n"); goto error; } /* Is the LUT index reasonable? */ if (dec->cmap->data.cmap.ents[i].pcol >= dec->pclr->data.pclr.numchans) { jas_eprintf("error: invalid CMAP LUT index\n"); goto error; } } } /* Allocate space for the channel-number to component-number LUT. */ if (!(dec->chantocmptlut = jas_alloc2(dec->numchans, sizeof(uint_fast16_t)))) { jas_eprintf("error: no memory\n"); goto error; } if (!dec->cmap) { for (i = 0; i < dec->numchans; ++i) { dec->chantocmptlut[i] = i; } } else { cmapd = &dec->cmap->data.cmap; pclrd = &dec->pclr->data.pclr; cdefd = &dec->cdef->data.cdef; for (channo = 0; channo < cmapd->numchans; ++channo) { cmapent = &cmapd->ents[channo]; if (cmapent->map == JP2_CMAP_DIRECT) { dec->chantocmptlut[channo] = channo; } else if (cmapent->map == JP2_CMAP_PALETTE) { lutents = jas_alloc2(pclrd->numlutents, sizeof(int_fast32_t)); for (i = 0; i < pclrd->numlutents; ++i) { lutents[i] = pclrd->lutdata[cmapent->pcol + i * pclrd->numchans]; } newcmptno = jas_image_numcmpts(dec->image); jas_image_depalettize(dec->image, cmapent->cmptno, pclrd->numlutents, lutents, JP2_BPCTODTYPE(pclrd->bpc[cmapent->pcol]), newcmptno); dec->chantocmptlut[channo] = newcmptno; jas_free(lutents); #if 0 if (dec->cdef) { cdefent = jp2_cdef_lookup(cdefd, channo); if (!cdefent) { abort(); } jas_image_setcmpttype(dec->image, newcmptno, jp2_getct(jas_image_clrspc(dec->image), cdefent->type, cdefent->assoc)); } else { jas_image_setcmpttype(dec->image, newcmptno, jp2_getct(jas_image_clrspc(dec->image), 0, channo + 1)); } #endif } } } /* Mark all components as being of unknown type. */ for (i = 0; i < JAS_CAST(uint, jas_image_numcmpts(dec->image)); ++i) { jas_image_setcmpttype(dec->image, i, JAS_IMAGE_CT_UNKNOWN); } /* Determine the type of each component. */ if (dec->cdef) { for (i = 0; i < dec->numchans; ++i) { /* Is the channel number reasonable? */ if (dec->cdef->data.cdef.ents[i].channo >= dec->numchans) { jas_eprintf("error: invalid channel number in CDEF box\n"); goto error; } jas_image_setcmpttype(dec->image, dec->chantocmptlut[dec->cdef->data.cdef.ents[i].channo], jp2_getct(jas_image_clrspc(dec->image), dec->cdef->data.cdef.ents[i].type, dec->cdef->data.cdef.ents[i].assoc)); } } else { for (i = 0; i < dec->numchans; ++i) { jas_image_setcmpttype(dec->image, dec->chantocmptlut[i], jp2_getct(jas_image_clrspc(dec->image), 0, i + 1)); } } /* Delete any components that are not of interest. */ for (i = jas_image_numcmpts(dec->image); i > 0; --i) { if (jas_image_cmpttype(dec->image, i - 1) == JAS_IMAGE_CT_UNKNOWN) { jas_image_delcmpt(dec->image, i - 1); } } /* Ensure that some components survived. */ if (!jas_image_numcmpts(dec->image)) { jas_eprintf("error: no components\n"); goto error; } #if 0 jas_eprintf("no of components is %d\n", jas_image_numcmpts(dec->image)); #endif /* Prevent the image from being destroyed later. */ image = dec->image; dec->image = 0; jp2_dec_destroy(dec); return image; error: if (box) { jp2_box_destroy(box); } if (dec) { jp2_dec_destroy(dec); } return 0; }
168,754
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: read_gif(Gif_Reader *grr, int read_flags, const char* landmark, Gif_ReadErrorHandler handler) { Gif_Stream *gfs; Gif_Image *gfi; Gif_Context gfc; int unknown_block_type = 0; if (gifgetc(grr) != 'G' || gifgetc(grr) != 'I' || gifgetc(grr) != 'F') return 0; (void)gifgetc(grr); (void)gifgetc(grr); (void)gifgetc(grr); gfs = Gif_NewStream(); gfi = Gif_NewImage(); gfc.stream = gfs; gfc.prefix = Gif_NewArray(Gif_Code, GIF_MAX_CODE); gfc.suffix = Gif_NewArray(uint8_t, GIF_MAX_CODE); gfc.length = Gif_NewArray(uint16_t, GIF_MAX_CODE); gfc.handler = handler; gfc.gfi = gfi; gfc.errors[0] = gfc.errors[1] = 0; if (!gfs || !gfi || !gfc.prefix || !gfc.suffix || !gfc.length) goto done; gfs->landmark = landmark; GIF_DEBUG(("\nGIF ")); if (!read_logical_screen_descriptor(gfs, grr)) goto done; GIF_DEBUG(("logscrdesc ")); while (!gifeof(grr)) { uint8_t block = gifgetbyte(grr); switch (block) { case ',': /* image block */ GIF_DEBUG(("imageread %d ", gfs->nimages)); gfi->identifier = last_name; last_name = 0; if (!Gif_AddImage(gfs, gfi)) goto done; else if (!read_image(grr, &gfc, gfi, read_flags)) { Gif_RemoveImage(gfs, gfs->nimages - 1); gfi = 0; goto done; } gfc.gfi = gfi = Gif_NewImage(); if (!gfi) goto done; break; case ';': /* terminator */ GIF_DEBUG(("term\n")); goto done; case '!': /* extension */ block = gifgetbyte(grr); GIF_DEBUG(("ext(0x%02X) ", block)); switch (block) { case 0xF9: read_graphic_control_extension(&gfc, gfi, grr); break; case 0xCE: last_name = suck_data(last_name, 0, grr); break; case 0xFE: if (!read_comment_extension(gfi, grr)) goto done; break; case 0xFF: read_application_extension(&gfc, grr); break; default: read_unknown_extension(&gfc, grr, block, 0, 0); break; } break; default: if (!unknown_block_type) { char buf[256]; sprintf(buf, "unknown block type %d at file offset %u", block, grr->pos - 1); gif_read_error(&gfc, 1, buf); unknown_block_type = 1; } break; } } done: /* Move comments and extensions after last image into stream. */ if (gfs && gfi) { Gif_Extension* gfex; gfs->end_comment = gfi->comment; gfi->comment = 0; gfs->end_extension_list = gfi->extension_list; gfi->extension_list = 0; for (gfex = gfs->end_extension_list; gfex; gfex = gfex->next) gfex->image = NULL; } Gif_DeleteImage(gfi); Gif_DeleteArray(last_name); Gif_DeleteArray(gfc.prefix); Gif_DeleteArray(gfc.suffix); Gif_DeleteArray(gfc.length); gfc.gfi = 0; if (gfs) gfs->errors = gfc.errors[1]; if (gfs && gfc.errors[1] == 0 && !(read_flags & GIF_READ_TRAILING_GARBAGE_OK) && !grr->eofer(grr)) gif_read_error(&gfc, 0, "trailing garbage after GIF ignored"); /* finally, export last message */ gif_read_error(&gfc, -1, 0); return gfs; } Commit Message: gif_read: Set last_name = NULL unconditionally. With a non-malicious GIF, last_name is set to NULL when a name extension is followed by an image. Reported in #117, via Debian, via a KAIST fuzzing program. CWE ID: CWE-415
read_gif(Gif_Reader *grr, int read_flags, const char* landmark, Gif_ReadErrorHandler handler) { Gif_Stream *gfs; Gif_Image *gfi; Gif_Context gfc; int unknown_block_type = 0; if (gifgetc(grr) != 'G' || gifgetc(grr) != 'I' || gifgetc(grr) != 'F') return 0; (void)gifgetc(grr); (void)gifgetc(grr); (void)gifgetc(grr); gfs = Gif_NewStream(); gfi = Gif_NewImage(); gfc.stream = gfs; gfc.prefix = Gif_NewArray(Gif_Code, GIF_MAX_CODE); gfc.suffix = Gif_NewArray(uint8_t, GIF_MAX_CODE); gfc.length = Gif_NewArray(uint16_t, GIF_MAX_CODE); gfc.handler = handler; gfc.gfi = gfi; gfc.errors[0] = gfc.errors[1] = 0; if (!gfs || !gfi || !gfc.prefix || !gfc.suffix || !gfc.length) goto done; gfs->landmark = landmark; GIF_DEBUG(("\nGIF ")); if (!read_logical_screen_descriptor(gfs, grr)) goto done; GIF_DEBUG(("logscrdesc ")); while (!gifeof(grr)) { uint8_t block = gifgetbyte(grr); switch (block) { case ',': /* image block */ GIF_DEBUG(("imageread %d ", gfs->nimages)); gfi->identifier = last_name; last_name = 0; if (!Gif_AddImage(gfs, gfi)) goto done; else if (!read_image(grr, &gfc, gfi, read_flags)) { Gif_RemoveImage(gfs, gfs->nimages - 1); gfi = 0; goto done; } gfc.gfi = gfi = Gif_NewImage(); if (!gfi) goto done; break; case ';': /* terminator */ GIF_DEBUG(("term\n")); goto done; case '!': /* extension */ block = gifgetbyte(grr); GIF_DEBUG(("ext(0x%02X) ", block)); switch (block) { case 0xF9: read_graphic_control_extension(&gfc, gfi, grr); break; case 0xCE: last_name = suck_data(last_name, 0, grr); break; case 0xFE: if (!read_comment_extension(gfi, grr)) goto done; break; case 0xFF: read_application_extension(&gfc, grr); break; default: read_unknown_extension(&gfc, grr, block, 0, 0); break; } break; default: if (!unknown_block_type) { char buf[256]; sprintf(buf, "unknown block type %d at file offset %u", block, grr->pos - 1); gif_read_error(&gfc, 1, buf); unknown_block_type = 1; } break; } } done: /* Move comments and extensions after last image into stream. */ if (gfs && gfi) { Gif_Extension* gfex; gfs->end_comment = gfi->comment; gfi->comment = 0; gfs->end_extension_list = gfi->extension_list; gfi->extension_list = 0; for (gfex = gfs->end_extension_list; gfex; gfex = gfex->next) gfex->image = NULL; } Gif_DeleteImage(gfi); Gif_DeleteArray(last_name); Gif_DeleteArray(gfc.prefix); Gif_DeleteArray(gfc.suffix); Gif_DeleteArray(gfc.length); gfc.gfi = 0; last_name = 0; if (gfs) gfs->errors = gfc.errors[1]; if (gfs && gfc.errors[1] == 0 && !(read_flags & GIF_READ_TRAILING_GARBAGE_OK) && !grr->eofer(grr)) gif_read_error(&gfc, 0, "trailing garbage after GIF ignored"); /* finally, export last message */ gif_read_error(&gfc, -1, 0); return gfs; }
169,420
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int inode_change_ok(const struct inode *inode, struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; /* * First check size constraints. These can't be overriden using * ATTR_FORCE. */ if (ia_valid & ATTR_SIZE) { int error = inode_newsize_ok(inode, attr->ia_size); if (error) return error; } /* If force is set do it anyway. */ if (ia_valid & ATTR_FORCE) return 0; /* Make sure a caller can chown. */ if ((ia_valid & ATTR_UID) && (!uid_eq(current_fsuid(), inode->i_uid) || !uid_eq(attr->ia_uid, inode->i_uid)) && !inode_capable(inode, CAP_CHOWN)) return -EPERM; /* Make sure caller can chgrp. */ if ((ia_valid & ATTR_GID) && (!uid_eq(current_fsuid(), inode->i_uid) || (!in_group_p(attr->ia_gid) && !gid_eq(attr->ia_gid, inode->i_gid))) && !inode_capable(inode, CAP_CHOWN)) return -EPERM; /* Make sure a caller can chmod. */ if (ia_valid & ATTR_MODE) { if (!inode_owner_or_capable(inode)) return -EPERM; /* Also check the setgid bit! */ if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid : inode->i_gid) && !inode_capable(inode, CAP_FSETID)) attr->ia_mode &= ~S_ISGID; } /* Check for setting the inode time. */ if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) { if (!inode_owner_or_capable(inode)) return -EPERM; } return 0; } Commit Message: fs,userns: Change inode_capable to capable_wrt_inode_uidgid The kernel has no concept of capabilities with respect to inodes; inodes exist independently of namespaces. For example, inode_capable(inode, CAP_LINUX_IMMUTABLE) would be nonsense. This patch changes inode_capable to check for uid and gid mappings and renames it to capable_wrt_inode_uidgid, which should make it more obvious what it does. Fixes CVE-2014-4014. Cc: Theodore Ts'o <[email protected]> Cc: Serge Hallyn <[email protected]> Cc: "Eric W. Biederman" <[email protected]> Cc: Dave Chinner <[email protected]> Cc: [email protected] Signed-off-by: Andy Lutomirski <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> CWE ID: CWE-264
int inode_change_ok(const struct inode *inode, struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; /* * First check size constraints. These can't be overriden using * ATTR_FORCE. */ if (ia_valid & ATTR_SIZE) { int error = inode_newsize_ok(inode, attr->ia_size); if (error) return error; } /* If force is set do it anyway. */ if (ia_valid & ATTR_FORCE) return 0; /* Make sure a caller can chown. */ if ((ia_valid & ATTR_UID) && (!uid_eq(current_fsuid(), inode->i_uid) || !uid_eq(attr->ia_uid, inode->i_uid)) && !capable_wrt_inode_uidgid(inode, CAP_CHOWN)) return -EPERM; /* Make sure caller can chgrp. */ if ((ia_valid & ATTR_GID) && (!uid_eq(current_fsuid(), inode->i_uid) || (!in_group_p(attr->ia_gid) && !gid_eq(attr->ia_gid, inode->i_gid))) && !capable_wrt_inode_uidgid(inode, CAP_CHOWN)) return -EPERM; /* Make sure a caller can chmod. */ if (ia_valid & ATTR_MODE) { if (!inode_owner_or_capable(inode)) return -EPERM; /* Also check the setgid bit! */ if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid : inode->i_gid) && !capable_wrt_inode_uidgid(inode, CAP_FSETID)) attr->ia_mode &= ~S_ISGID; } /* Check for setting the inode time. */ if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) { if (!inode_owner_or_capable(inode)) return -EPERM; } return 0; }
166,317
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: process_add_smartcard_key(SocketEntry *e) { char *provider = NULL, *pin; int r, i, version, count = 0, success = 0, confirm = 0; u_int seconds; time_t death = 0; u_char type; struct sshkey **keys = NULL, *k; Identity *id; Idtab *tab; if ((r = sshbuf_get_cstring(e->request, &provider, NULL)) != 0 || (r = sshbuf_get_cstring(e->request, &pin, NULL)) != 0) fatal("%s: buffer error: %s", __func__, ssh_err(r)); while (sshbuf_len(e->request)) { if ((r = sshbuf_get_u8(e->request, &type)) != 0) fatal("%s: buffer error: %s", __func__, ssh_err(r)); switch (type) { case SSH_AGENT_CONSTRAIN_LIFETIME: if ((r = sshbuf_get_u32(e->request, &seconds)) != 0) fatal("%s: buffer error: %s", __func__, ssh_err(r)); death = monotime() + seconds; break; case SSH_AGENT_CONSTRAIN_CONFIRM: confirm = 1; break; default: error("process_add_smartcard_key: " "Unknown constraint type %d", type); goto send; } } if (lifetime && !death) death = monotime() + lifetime; count = pkcs11_add_provider(provider, pin, &keys); for (i = 0; i < count; i++) { k = keys[i]; version = k->type == KEY_RSA1 ? 1 : 2; tab = idtab_lookup(version); if (lookup_identity(k, version) == NULL) { id = xcalloc(1, sizeof(Identity)); id->key = k; id->provider = xstrdup(provider); id->comment = xstrdup(provider); /* XXX */ id->death = death; id->confirm = confirm; TAILQ_INSERT_TAIL(&tab->idlist, id, next); tab->nentries++; success = 1; } else { sshkey_free(k); } keys[i] = NULL; } send: free(pin); free(provider); free(keys); send_status(e, success); } Commit Message: add a whitelist of paths from which ssh-agent will load (via ssh-pkcs11-helper) a PKCS#11 module; ok markus@ CWE ID: CWE-426
process_add_smartcard_key(SocketEntry *e) { char *provider = NULL, *pin, canonical_provider[PATH_MAX]; int r, i, version, count = 0, success = 0, confirm = 0; u_int seconds; time_t death = 0; u_char type; struct sshkey **keys = NULL, *k; Identity *id; Idtab *tab; if ((r = sshbuf_get_cstring(e->request, &provider, NULL)) != 0 || (r = sshbuf_get_cstring(e->request, &pin, NULL)) != 0) fatal("%s: buffer error: %s", __func__, ssh_err(r)); while (sshbuf_len(e->request)) { if ((r = sshbuf_get_u8(e->request, &type)) != 0) fatal("%s: buffer error: %s", __func__, ssh_err(r)); switch (type) { case SSH_AGENT_CONSTRAIN_LIFETIME: if ((r = sshbuf_get_u32(e->request, &seconds)) != 0) fatal("%s: buffer error: %s", __func__, ssh_err(r)); death = monotime() + seconds; break; case SSH_AGENT_CONSTRAIN_CONFIRM: confirm = 1; break; default: error("process_add_smartcard_key: " "Unknown constraint type %d", type); goto send; } } if (realpath(provider, canonical_provider) == NULL) { verbose("failed PKCS#11 add of \"%.100s\": realpath: %s", provider, strerror(errno)); goto send; } if (match_pattern_list(canonical_provider, pkcs11_whitelist, 0) != 1) { verbose("refusing PKCS#11 add of \"%.100s\": " "provider not whitelisted", canonical_provider); goto send; } debug("%s: add %.100s", __func__, canonical_provider); if (lifetime && !death) death = monotime() + lifetime; count = pkcs11_add_provider(canonical_provider, pin, &keys); for (i = 0; i < count; i++) { k = keys[i]; version = k->type == KEY_RSA1 ? 1 : 2; tab = idtab_lookup(version); if (lookup_identity(k, version) == NULL) { id = xcalloc(1, sizeof(Identity)); id->key = k; id->provider = xstrdup(canonical_provider); id->comment = xstrdup(canonical_provider); /* XXX */ id->death = death; id->confirm = confirm; TAILQ_INSERT_TAIL(&tab->idlist, id, next); tab->nentries++; success = 1; } else { sshkey_free(k); } keys[i] = NULL; } send: free(pin); free(provider); free(keys); send_status(e, success); }
168,664
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: png_read_row(png_structp png_ptr, png_bytep row, png_bytep dsp_row) { PNG_CONST PNG_IDAT; PNG_CONST int png_pass_dsp_mask[7] = {0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff}; PNG_CONST int png_pass_mask[7] = {0x80, 0x08, 0x88, 0x22, 0xaa, 0x55, 0xff}; int ret; if (png_ptr == NULL) return; png_debug2(1, "in png_read_row (row %lu, pass %d)", png_ptr->row_number, png_ptr->pass); if (!(png_ptr->flags & PNG_FLAG_ROW_INIT)) png_read_start_row(png_ptr); if (png_ptr->row_number == 0 && png_ptr->pass == 0) { /* Check for transforms that have been set but were defined out */ #if defined(PNG_WRITE_INVERT_SUPPORTED) && !defined(PNG_READ_INVERT_SUPPORTED) if (png_ptr->transformations & PNG_INVERT_MONO) png_warning(png_ptr, "PNG_READ_INVERT_SUPPORTED is not defined."); #endif #if defined(PNG_WRITE_FILLER_SUPPORTED) && !defined(PNG_READ_FILLER_SUPPORTED) if (png_ptr->transformations & PNG_FILLER) png_warning(png_ptr, "PNG_READ_FILLER_SUPPORTED is not defined."); #endif #if defined(PNG_WRITE_PACKSWAP_SUPPORTED) && \ !defined(PNG_READ_PACKSWAP_SUPPORTED) if (png_ptr->transformations & PNG_PACKSWAP) png_warning(png_ptr, "PNG_READ_PACKSWAP_SUPPORTED is not defined."); #endif #if defined(PNG_WRITE_PACK_SUPPORTED) && !defined(PNG_READ_PACK_SUPPORTED) if (png_ptr->transformations & PNG_PACK) png_warning(png_ptr, "PNG_READ_PACK_SUPPORTED is not defined."); #endif #if defined(PNG_WRITE_SHIFT_SUPPORTED) && !defined(PNG_READ_SHIFT_SUPPORTED) if (png_ptr->transformations & PNG_SHIFT) png_warning(png_ptr, "PNG_READ_SHIFT_SUPPORTED is not defined."); #endif #if defined(PNG_WRITE_BGR_SUPPORTED) && !defined(PNG_READ_BGR_SUPPORTED) if (png_ptr->transformations & PNG_BGR) png_warning(png_ptr, "PNG_READ_BGR_SUPPORTED is not defined."); #endif #if defined(PNG_WRITE_SWAP_SUPPORTED) && !defined(PNG_READ_SWAP_SUPPORTED) if (png_ptr->transformations & PNG_SWAP_BYTES) png_warning(png_ptr, "PNG_READ_SWAP_SUPPORTED is not defined."); #endif } #ifdef PNG_READ_INTERLACING_SUPPORTED /* If interlaced and we do not need a new row, combine row and return */ if (png_ptr->interlaced && (png_ptr->transformations & PNG_INTERLACE)) { switch (png_ptr->pass) { case 0: if (png_ptr->row_number & 0x07) { if (dsp_row != NULL) png_combine_row(png_ptr, dsp_row, png_pass_dsp_mask[png_ptr->pass]); png_read_finish_row(png_ptr); return; } break; case 1: if ((png_ptr->row_number & 0x07) || png_ptr->width < 5) { if (dsp_row != NULL) png_combine_row(png_ptr, dsp_row, png_pass_dsp_mask[png_ptr->pass]); png_read_finish_row(png_ptr); return; } break; case 2: if ((png_ptr->row_number & 0x07) != 4) { if (dsp_row != NULL && (png_ptr->row_number & 4)) png_combine_row(png_ptr, dsp_row, png_pass_dsp_mask[png_ptr->pass]); png_read_finish_row(png_ptr); return; } break; case 3: if ((png_ptr->row_number & 3) || png_ptr->width < 3) { if (dsp_row != NULL) png_combine_row(png_ptr, dsp_row, png_pass_dsp_mask[png_ptr->pass]); png_read_finish_row(png_ptr); return; } break; case 4: if ((png_ptr->row_number & 3) != 2) { if (dsp_row != NULL && (png_ptr->row_number & 2)) png_combine_row(png_ptr, dsp_row, png_pass_dsp_mask[png_ptr->pass]); png_read_finish_row(png_ptr); return; } break; case 5: if ((png_ptr->row_number & 1) || png_ptr->width < 2) { if (dsp_row != NULL) png_combine_row(png_ptr, dsp_row, png_pass_dsp_mask[png_ptr->pass]); png_read_finish_row(png_ptr); return; } break; case 6: if (!(png_ptr->row_number & 1)) { png_read_finish_row(png_ptr); return; } break; } } #endif if (!(png_ptr->mode & PNG_HAVE_IDAT)) png_error(png_ptr, "Invalid attempt to read row data"); png_ptr->zstream.next_out = png_ptr->row_buf; png_ptr->zstream.avail_out = (uInt)(PNG_ROWBYTES(png_ptr->pixel_depth, png_ptr->iwidth) + 1); do { if (!(png_ptr->zstream.avail_in)) { while (!png_ptr->idat_size) { png_crc_finish(png_ptr, 0); png_ptr->idat_size = png_read_chunk_header(png_ptr); if (png_memcmp(png_ptr->chunk_name, png_IDAT, 4)) png_error(png_ptr, "Not enough image data"); } png_ptr->zstream.avail_in = (uInt)png_ptr->zbuf_size; png_ptr->zstream.next_in = png_ptr->zbuf; if (png_ptr->zbuf_size > png_ptr->idat_size) png_ptr->zstream.avail_in = (uInt)png_ptr->idat_size; png_crc_read(png_ptr, png_ptr->zbuf, (png_size_t)png_ptr->zstream.avail_in); png_ptr->idat_size -= png_ptr->zstream.avail_in; } ret = inflate(&png_ptr->zstream, Z_PARTIAL_FLUSH); if (ret == Z_STREAM_END) { if (png_ptr->zstream.avail_out || png_ptr->zstream.avail_in || png_ptr->idat_size) png_error(png_ptr, "Extra compressed data"); png_ptr->mode |= PNG_AFTER_IDAT; png_ptr->flags |= PNG_FLAG_ZLIB_FINISHED; break; } if (ret != Z_OK) png_error(png_ptr, png_ptr->zstream.msg ? png_ptr->zstream.msg : "Decompression error"); } while (png_ptr->zstream.avail_out); png_ptr->row_info.color_type = png_ptr->color_type; png_ptr->row_info.width = png_ptr->iwidth; png_ptr->row_info.channels = png_ptr->channels; png_ptr->row_info.bit_depth = png_ptr->bit_depth; png_ptr->row_info.pixel_depth = png_ptr->pixel_depth; png_ptr->row_info.rowbytes = PNG_ROWBYTES(png_ptr->row_info.pixel_depth, png_ptr->row_info.width); if (png_ptr->row_buf[0]) png_read_filter_row(png_ptr, &(png_ptr->row_info), png_ptr->row_buf + 1, png_ptr->prev_row + 1, (int)(png_ptr->row_buf[0])); png_memcpy_check(png_ptr, png_ptr->prev_row, png_ptr->row_buf, png_ptr->rowbytes + 1); #ifdef PNG_MNG_FEATURES_SUPPORTED if ((png_ptr->mng_features_permitted & PNG_FLAG_MNG_FILTER_64) && (png_ptr->filter_type == PNG_INTRAPIXEL_DIFFERENCING)) { /* Intrapixel differencing */ png_do_read_intrapixel(&(png_ptr->row_info), png_ptr->row_buf + 1); } #endif if (png_ptr->transformations || (png_ptr->flags&PNG_FLAG_STRIP_ALPHA)) png_do_read_transformations(png_ptr); #ifdef PNG_READ_INTERLACING_SUPPORTED /* Blow up interlaced rows to full size */ if (png_ptr->interlaced && (png_ptr->transformations & PNG_INTERLACE)) { if (png_ptr->pass < 6) /* Old interface (pre-1.0.9): * png_do_read_interlace(&(png_ptr->row_info), * png_ptr->row_buf + 1, png_ptr->pass, png_ptr->transformations); */ png_do_read_interlace(png_ptr); if (dsp_row != NULL) png_combine_row(png_ptr, dsp_row, png_pass_dsp_mask[png_ptr->pass]); if (row != NULL) png_combine_row(png_ptr, row, png_pass_mask[png_ptr->pass]); } else #endif { if (row != NULL) png_combine_row(png_ptr, row, 0xff); if (dsp_row != NULL) png_combine_row(png_ptr, dsp_row, 0xff); } png_read_finish_row(png_ptr); if (png_ptr->read_row_fn != NULL) (*(png_ptr->read_row_fn))(png_ptr, png_ptr->row_number, png_ptr->pass); } Commit Message: third_party/libpng: update to 1.2.54 [email protected] BUG=560291 Review URL: https://codereview.chromium.org/1467263003 Cr-Commit-Position: refs/heads/master@{#362298} CWE ID: CWE-119
png_read_row(png_structp png_ptr, png_bytep row, png_bytep dsp_row) { #ifndef PNG_USE_GLOBAL_ARRAYS PNG_CONST PNG_IDAT; PNG_CONST int png_pass_dsp_mask[7] = {0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff}; PNG_CONST int png_pass_mask[7] = {0x80, 0x08, 0x88, 0x22, 0xaa, 0x55, 0xff}; #endif int ret; if (png_ptr == NULL) return; png_debug2(1, "in png_read_row (row %lu, pass %d)", png_ptr->row_number, png_ptr->pass); if (!(png_ptr->flags & PNG_FLAG_ROW_INIT)) png_read_start_row(png_ptr); if (png_ptr->row_number == 0 && png_ptr->pass == 0) { /* Check for transforms that have been set but were defined out */ #if defined(PNG_WRITE_INVERT_SUPPORTED) && !defined(PNG_READ_INVERT_SUPPORTED) if (png_ptr->transformations & PNG_INVERT_MONO) png_warning(png_ptr, "PNG_READ_INVERT_SUPPORTED is not defined."); #endif #if defined(PNG_WRITE_FILLER_SUPPORTED) && !defined(PNG_READ_FILLER_SUPPORTED) if (png_ptr->transformations & PNG_FILLER) png_warning(png_ptr, "PNG_READ_FILLER_SUPPORTED is not defined."); #endif #if defined(PNG_WRITE_PACKSWAP_SUPPORTED) && \ !defined(PNG_READ_PACKSWAP_SUPPORTED) if (png_ptr->transformations & PNG_PACKSWAP) png_warning(png_ptr, "PNG_READ_PACKSWAP_SUPPORTED is not defined."); #endif #if defined(PNG_WRITE_PACK_SUPPORTED) && !defined(PNG_READ_PACK_SUPPORTED) if (png_ptr->transformations & PNG_PACK) png_warning(png_ptr, "PNG_READ_PACK_SUPPORTED is not defined."); #endif #if defined(PNG_WRITE_SHIFT_SUPPORTED) && !defined(PNG_READ_SHIFT_SUPPORTED) if (png_ptr->transformations & PNG_SHIFT) png_warning(png_ptr, "PNG_READ_SHIFT_SUPPORTED is not defined."); #endif #if defined(PNG_WRITE_BGR_SUPPORTED) && !defined(PNG_READ_BGR_SUPPORTED) if (png_ptr->transformations & PNG_BGR) png_warning(png_ptr, "PNG_READ_BGR_SUPPORTED is not defined."); #endif #if defined(PNG_WRITE_SWAP_SUPPORTED) && !defined(PNG_READ_SWAP_SUPPORTED) if (png_ptr->transformations & PNG_SWAP_BYTES) png_warning(png_ptr, "PNG_READ_SWAP_SUPPORTED is not defined."); #endif } #ifdef PNG_READ_INTERLACING_SUPPORTED /* If interlaced and we do not need a new row, combine row and return */ if (png_ptr->interlaced && (png_ptr->transformations & PNG_INTERLACE)) { switch (png_ptr->pass) { case 0: if (png_ptr->row_number & 0x07) { if (dsp_row != NULL) png_combine_row(png_ptr, dsp_row, png_pass_dsp_mask[png_ptr->pass]); png_read_finish_row(png_ptr); return; } break; case 1: if ((png_ptr->row_number & 0x07) || png_ptr->width < 5) { if (dsp_row != NULL) png_combine_row(png_ptr, dsp_row, png_pass_dsp_mask[png_ptr->pass]); png_read_finish_row(png_ptr); return; } break; case 2: if ((png_ptr->row_number & 0x07) != 4) { if (dsp_row != NULL && (png_ptr->row_number & 4)) png_combine_row(png_ptr, dsp_row, png_pass_dsp_mask[png_ptr->pass]); png_read_finish_row(png_ptr); return; } break; case 3: if ((png_ptr->row_number & 3) || png_ptr->width < 3) { if (dsp_row != NULL) png_combine_row(png_ptr, dsp_row, png_pass_dsp_mask[png_ptr->pass]); png_read_finish_row(png_ptr); return; } break; case 4: if ((png_ptr->row_number & 3) != 2) { if (dsp_row != NULL && (png_ptr->row_number & 2)) png_combine_row(png_ptr, dsp_row, png_pass_dsp_mask[png_ptr->pass]); png_read_finish_row(png_ptr); return; } break; case 5: if ((png_ptr->row_number & 1) || png_ptr->width < 2) { if (dsp_row != NULL) png_combine_row(png_ptr, dsp_row, png_pass_dsp_mask[png_ptr->pass]); png_read_finish_row(png_ptr); return; } break; case 6: if (!(png_ptr->row_number & 1)) { png_read_finish_row(png_ptr); return; } break; } } #endif if (!(png_ptr->mode & PNG_HAVE_IDAT)) png_error(png_ptr, "Invalid attempt to read row data"); png_ptr->zstream.next_out = png_ptr->row_buf; png_ptr->zstream.avail_out = (uInt)(PNG_ROWBYTES(png_ptr->pixel_depth, png_ptr->iwidth) + 1); do { if (!(png_ptr->zstream.avail_in)) { while (!png_ptr->idat_size) { png_crc_finish(png_ptr, 0); png_ptr->idat_size = png_read_chunk_header(png_ptr); if (png_memcmp(png_ptr->chunk_name, png_IDAT, 4)) png_error(png_ptr, "Not enough image data"); } png_ptr->zstream.avail_in = (uInt)png_ptr->zbuf_size; png_ptr->zstream.next_in = png_ptr->zbuf; if (png_ptr->zbuf_size > png_ptr->idat_size) png_ptr->zstream.avail_in = (uInt)png_ptr->idat_size; png_crc_read(png_ptr, png_ptr->zbuf, (png_size_t)png_ptr->zstream.avail_in); png_ptr->idat_size -= png_ptr->zstream.avail_in; } ret = inflate(&png_ptr->zstream, Z_PARTIAL_FLUSH); if (ret == Z_STREAM_END) { if (png_ptr->zstream.avail_out || png_ptr->zstream.avail_in || png_ptr->idat_size) png_error(png_ptr, "Extra compressed data"); png_ptr->mode |= PNG_AFTER_IDAT; png_ptr->flags |= PNG_FLAG_ZLIB_FINISHED; break; } if (ret != Z_OK) png_error(png_ptr, png_ptr->zstream.msg ? png_ptr->zstream.msg : "Decompression error"); } while (png_ptr->zstream.avail_out); png_ptr->row_info.color_type = png_ptr->color_type; png_ptr->row_info.width = png_ptr->iwidth; png_ptr->row_info.channels = png_ptr->channels; png_ptr->row_info.bit_depth = png_ptr->bit_depth; png_ptr->row_info.pixel_depth = png_ptr->pixel_depth; png_ptr->row_info.rowbytes = PNG_ROWBYTES(png_ptr->row_info.pixel_depth, png_ptr->row_info.width); if (png_ptr->row_buf[0]) png_read_filter_row(png_ptr, &(png_ptr->row_info), png_ptr->row_buf + 1, png_ptr->prev_row + 1, (int)(png_ptr->row_buf[0])); png_memcpy_check(png_ptr, png_ptr->prev_row, png_ptr->row_buf, png_ptr->rowbytes + 1); #ifdef PNG_MNG_FEATURES_SUPPORTED if ((png_ptr->mng_features_permitted & PNG_FLAG_MNG_FILTER_64) && (png_ptr->filter_type == PNG_INTRAPIXEL_DIFFERENCING)) { /* Intrapixel differencing */ png_do_read_intrapixel(&(png_ptr->row_info), png_ptr->row_buf + 1); } #endif if (png_ptr->transformations || (png_ptr->flags&PNG_FLAG_STRIP_ALPHA)) png_do_read_transformations(png_ptr); #ifdef PNG_READ_INTERLACING_SUPPORTED /* Blow up interlaced rows to full size */ if (png_ptr->interlaced && (png_ptr->transformations & PNG_INTERLACE)) { if (png_ptr->pass < 6) /* Old interface (pre-1.0.9): * png_do_read_interlace(&(png_ptr->row_info), * png_ptr->row_buf + 1, png_ptr->pass, png_ptr->transformations); */ png_do_read_interlace(png_ptr); if (dsp_row != NULL) png_combine_row(png_ptr, dsp_row, png_pass_dsp_mask[png_ptr->pass]); if (row != NULL) png_combine_row(png_ptr, row, png_pass_mask[png_ptr->pass]); } else #endif { if (row != NULL) png_combine_row(png_ptr, row, 0xff); if (dsp_row != NULL) png_combine_row(png_ptr, dsp_row, 0xff); } png_read_finish_row(png_ptr); if (png_ptr->read_row_fn != NULL) (*(png_ptr->read_row_fn))(png_ptr, png_ptr->row_number, png_ptr->pass); }
172,170
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: pcf_get_encodings( FT_Stream stream, PCF_Face face ) { FT_Error error; FT_Memory memory = FT_FACE( face )->memory; FT_ULong format, size; int firstCol, lastCol; int firstRow, lastRow; int nencoding, encodingOffset; int i, j, k; PCF_Encoding encoding = NULL; error = pcf_seek_to_table_type( stream, face->toc.tables, face->toc.count, PCF_BDF_ENCODINGS, &format, &size ); if ( error ) return error; error = FT_Stream_EnterFrame( stream, 14 ); if ( error ) return error; format = FT_GET_ULONG_LE(); if ( PCF_BYTE_ORDER( format ) == MSBFirst ) { firstCol = FT_GET_SHORT(); lastCol = FT_GET_SHORT(); firstRow = FT_GET_SHORT(); lastRow = FT_GET_SHORT(); face->defaultChar = FT_GET_SHORT(); } else { firstCol = FT_GET_SHORT_LE(); lastCol = FT_GET_SHORT_LE(); firstRow = FT_GET_SHORT_LE(); lastRow = FT_GET_SHORT_LE(); face->defaultChar = FT_GET_SHORT_LE(); } FT_Stream_ExitFrame( stream ); if ( !PCF_FORMAT_MATCH( format, PCF_DEFAULT_FORMAT ) ) return FT_THROW( Invalid_File_Format ); FT_TRACE4(( "pdf_get_encodings:\n" )); FT_TRACE4(( " firstCol %d, lastCol %d, firstRow %d, lastRow %d\n", goto Bail; k = 0; for ( i = firstRow; i <= lastRow; i++ ) { for ( j = firstCol; j <= lastCol; j++ ) { if ( PCF_BYTE_ORDER( format ) == MSBFirst ) encodingOffset = FT_GET_SHORT(); else encodingOffset = FT_GET_SHORT_LE(); if ( encodingOffset != -1 ) { encoding[k].enc = i * 256 + j; encoding[k].glyph = (FT_Short)encodingOffset; FT_TRACE5(( " code %d (0x%04X): idx %d\n", encoding[k].enc, encoding[k].enc, encoding[k].glyph )); k++; } } } FT_Stream_ExitFrame( stream ); if ( FT_RENEW_ARRAY( encoding, nencoding, k ) ) goto Bail; face->nencodings = k; face->encodings = encoding; return error; Bail: FT_FREE( encoding ); return error; } Commit Message: CWE ID: CWE-189
pcf_get_encodings( FT_Stream stream, PCF_Face face ) { FT_Error error; FT_Memory memory = FT_FACE( face )->memory; FT_ULong format, size; int firstCol, lastCol; int firstRow, lastRow; int nencoding, encodingOffset; int i, j, k; PCF_Encoding encoding = NULL; error = pcf_seek_to_table_type( stream, face->toc.tables, face->toc.count, PCF_BDF_ENCODINGS, &format, &size ); if ( error ) return error; error = FT_Stream_EnterFrame( stream, 14 ); if ( error ) return error; format = FT_GET_ULONG_LE(); if ( PCF_BYTE_ORDER( format ) == MSBFirst ) { firstCol = FT_GET_SHORT(); lastCol = FT_GET_SHORT(); firstRow = FT_GET_SHORT(); lastRow = FT_GET_SHORT(); face->defaultChar = FT_GET_SHORT(); } else { firstCol = FT_GET_SHORT_LE(); lastCol = FT_GET_SHORT_LE(); firstRow = FT_GET_SHORT_LE(); lastRow = FT_GET_SHORT_LE(); face->defaultChar = FT_GET_SHORT_LE(); } FT_Stream_ExitFrame( stream ); if ( !PCF_FORMAT_MATCH( format, PCF_DEFAULT_FORMAT ) ) return FT_THROW( Invalid_File_Format ); /* sanity checks */ if ( firstCol < 0 || firstCol > lastCol || lastCol > 0xFF || firstRow < 0 || firstRow > lastRow || lastRow > 0xFF ) return FT_THROW( Invalid_Table ); FT_TRACE4(( "pdf_get_encodings:\n" )); FT_TRACE4(( " firstCol %d, lastCol %d, firstRow %d, lastRow %d\n", goto Bail; k = 0; for ( i = firstRow; i <= lastRow; i++ ) { for ( j = firstCol; j <= lastCol; j++ ) { if ( PCF_BYTE_ORDER( format ) == MSBFirst ) encodingOffset = FT_GET_SHORT(); else encodingOffset = FT_GET_SHORT_LE(); if ( encodingOffset != -1 ) { encoding[k].enc = i * 256 + j; encoding[k].glyph = (FT_Short)encodingOffset; FT_TRACE5(( " code %d (0x%04X): idx %d\n", encoding[k].enc, encoding[k].enc, encoding[k].glyph )); k++; } } } FT_Stream_ExitFrame( stream ); if ( FT_RENEW_ARRAY( encoding, nencoding, k ) ) goto Bail; face->nencodings = k; face->encodings = encoding; return error; Bail: FT_FREE( encoding ); return error; }
164,844
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void Huff_Compress(msg_t *mbuf, int offset) { int i, ch, size; byte seq[65536]; byte* buffer; huff_t huff; size = mbuf->cursize - offset; buffer = mbuf->data+ + offset; if (size<=0) { return; } Com_Memset(&huff, 0, sizeof(huff_t)); huff.tree = huff.lhead = huff.loc[NYT] = &(huff.nodeList[huff.blocNode++]); huff.tree->symbol = NYT; huff.tree->weight = 0; huff.lhead->next = huff.lhead->prev = NULL; huff.tree->parent = huff.tree->left = huff.tree->right = NULL; seq[0] = (size>>8); seq[1] = size&0xff; bloc = 16; for (i=0; i<size; i++ ) { ch = buffer[i]; Huff_transmit(&huff, ch, seq); /* Transmit symbol */ Huff_addRef(&huff, (byte)ch); /* Do update */ } bloc += 8; // next byte mbuf->cursize = (bloc>>3) + offset; Com_Memcpy(mbuf->data+offset, seq, (bloc>>3)); } Commit Message: Fix/improve buffer overflow in MSG_ReadBits/MSG_WriteBits Prevent reading past end of message in MSG_ReadBits. If read past end of msg->data buffer (16348 bytes) the engine could SEGFAULT. Make MSG_WriteBits use an exact buffer overflow check instead of possibly failing with a few bytes left. CWE ID: CWE-119
void Huff_Compress(msg_t *mbuf, int offset) { int i, ch, size; byte seq[65536]; byte* buffer; huff_t huff; size = mbuf->cursize - offset; buffer = mbuf->data+ + offset; if (size<=0) { return; } Com_Memset(&huff, 0, sizeof(huff_t)); huff.tree = huff.lhead = huff.loc[NYT] = &(huff.nodeList[huff.blocNode++]); huff.tree->symbol = NYT; huff.tree->weight = 0; huff.lhead->next = huff.lhead->prev = NULL; huff.tree->parent = huff.tree->left = huff.tree->right = NULL; seq[0] = (size>>8); seq[1] = size&0xff; bloc = 16; for (i=0; i<size; i++ ) { ch = buffer[i]; Huff_transmit(&huff, ch, seq, size<<3); /* Transmit symbol */ Huff_addRef(&huff, (byte)ch); /* Do update */ } bloc += 8; // next byte mbuf->cursize = (bloc>>3) + offset; Com_Memcpy(mbuf->data+offset, seq, (bloc>>3)); }
167,993
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: error::Error GLES2DecoderImpl::HandleBeginQueryEXT( uint32_t immediate_data_size, const volatile void* cmd_data) { const volatile gles2::cmds::BeginQueryEXT& c = *static_cast<const volatile gles2::cmds::BeginQueryEXT*>(cmd_data); GLenum target = static_cast<GLenum>(c.target); GLuint client_id = static_cast<GLuint>(c.id); int32_t sync_shm_id = static_cast<int32_t>(c.sync_data_shm_id); uint32_t sync_shm_offset = static_cast<uint32_t>(c.sync_data_shm_offset); switch (target) { case GL_COMMANDS_ISSUED_CHROMIUM: case GL_LATENCY_QUERY_CHROMIUM: case GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM: case GL_GET_ERROR_QUERY_CHROMIUM: break; case GL_READBACK_SHADOW_COPIES_UPDATED_CHROMIUM: case GL_COMMANDS_COMPLETED_CHROMIUM: if (!features().chromium_sync_query) { LOCAL_SET_GL_ERROR( GL_INVALID_OPERATION, "glBeginQueryEXT", "not enabled for commands completed queries"); return error::kNoError; } break; case GL_SAMPLES_PASSED_ARB: if (!features().occlusion_query) { LOCAL_SET_GL_ERROR( GL_INVALID_OPERATION, "glBeginQueryEXT", "not enabled for occlusion queries"); return error::kNoError; } break; case GL_ANY_SAMPLES_PASSED: case GL_ANY_SAMPLES_PASSED_CONSERVATIVE: if (!features().occlusion_query_boolean) { LOCAL_SET_GL_ERROR( GL_INVALID_OPERATION, "glBeginQueryEXT", "not enabled for boolean occlusion queries"); return error::kNoError; } break; case GL_TIME_ELAPSED: if (!query_manager_->GPUTimingAvailable()) { LOCAL_SET_GL_ERROR( GL_INVALID_OPERATION, "glBeginQueryEXT", "not enabled for timing queries"); return error::kNoError; } break; case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN: if (feature_info_->IsWebGL2OrES3Context()) { break; } FALLTHROUGH; default: LOCAL_SET_GL_ERROR( GL_INVALID_ENUM, "glBeginQueryEXT", "unknown query target"); return error::kNoError; } if (query_manager_->GetActiveQuery(target)) { LOCAL_SET_GL_ERROR( GL_INVALID_OPERATION, "glBeginQueryEXT", "query already in progress"); return error::kNoError; } if (client_id == 0) { LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginQueryEXT", "id is 0"); return error::kNoError; } scoped_refptr<gpu::Buffer> buffer = GetSharedMemoryBuffer(sync_shm_id); if (!buffer) return error::kInvalidArguments; QuerySync* sync = static_cast<QuerySync*>( buffer->GetDataAddress(sync_shm_offset, sizeof(QuerySync))); if (!sync) return error::kOutOfBounds; QueryManager::Query* query = query_manager_->GetQuery(client_id); if (!query) { if (!query_manager_->IsValidQuery(client_id)) { LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginQueryEXT", "id not made by glGenQueriesEXT"); return error::kNoError; } query = query_manager_->CreateQuery(target, client_id, std::move(buffer), sync); } else { if (query->target() != target) { LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginQueryEXT", "target does not match"); return error::kNoError; } else if (query->sync() != sync) { DLOG(ERROR) << "Shared memory used by query not the same as before"; return error::kInvalidArguments; } } query_manager_->BeginQuery(query); return error::kNoError; } Commit Message: Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM This makes the query of GL_COMPLETION_STATUS_KHR to programs much cheaper by minimizing the round-trip to the GPU thread. Bug: 881152, 957001 Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630 Commit-Queue: Kenneth Russell <[email protected]> Reviewed-by: Kentaro Hara <[email protected]> Reviewed-by: Geoff Lang <[email protected]> Reviewed-by: Kenneth Russell <[email protected]> Cr-Commit-Position: refs/heads/master@{#657568} CWE ID: CWE-416
error::Error GLES2DecoderImpl::HandleBeginQueryEXT( uint32_t immediate_data_size, const volatile void* cmd_data) { const volatile gles2::cmds::BeginQueryEXT& c = *static_cast<const volatile gles2::cmds::BeginQueryEXT*>(cmd_data); GLenum target = static_cast<GLenum>(c.target); GLuint client_id = static_cast<GLuint>(c.id); int32_t sync_shm_id = static_cast<int32_t>(c.sync_data_shm_id); uint32_t sync_shm_offset = static_cast<uint32_t>(c.sync_data_shm_offset); switch (target) { case GL_COMMANDS_ISSUED_CHROMIUM: case GL_LATENCY_QUERY_CHROMIUM: case GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM: case GL_GET_ERROR_QUERY_CHROMIUM: break; case GL_READBACK_SHADOW_COPIES_UPDATED_CHROMIUM: case GL_COMMANDS_COMPLETED_CHROMIUM: if (!features().chromium_sync_query) { LOCAL_SET_GL_ERROR( GL_INVALID_OPERATION, "glBeginQueryEXT", "not enabled for commands completed queries"); return error::kNoError; } break; case GL_PROGRAM_COMPLETION_QUERY_CHROMIUM: if (!features().chromium_completion_query) { LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginQueryEXT", "not enabled for program completion queries"); return error::kNoError; } break; case GL_SAMPLES_PASSED_ARB: if (!features().occlusion_query) { LOCAL_SET_GL_ERROR( GL_INVALID_OPERATION, "glBeginQueryEXT", "not enabled for occlusion queries"); return error::kNoError; } break; case GL_ANY_SAMPLES_PASSED: case GL_ANY_SAMPLES_PASSED_CONSERVATIVE: if (!features().occlusion_query_boolean) { LOCAL_SET_GL_ERROR( GL_INVALID_OPERATION, "glBeginQueryEXT", "not enabled for boolean occlusion queries"); return error::kNoError; } break; case GL_TIME_ELAPSED: if (!query_manager_->GPUTimingAvailable()) { LOCAL_SET_GL_ERROR( GL_INVALID_OPERATION, "glBeginQueryEXT", "not enabled for timing queries"); return error::kNoError; } break; case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN: if (feature_info_->IsWebGL2OrES3Context()) { break; } FALLTHROUGH; default: LOCAL_SET_GL_ERROR( GL_INVALID_ENUM, "glBeginQueryEXT", "unknown query target"); return error::kNoError; } if (query_manager_->GetActiveQuery(target)) { LOCAL_SET_GL_ERROR( GL_INVALID_OPERATION, "glBeginQueryEXT", "query already in progress"); return error::kNoError; } if (client_id == 0) { LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginQueryEXT", "id is 0"); return error::kNoError; } scoped_refptr<gpu::Buffer> buffer = GetSharedMemoryBuffer(sync_shm_id); if (!buffer) return error::kInvalidArguments; QuerySync* sync = static_cast<QuerySync*>( buffer->GetDataAddress(sync_shm_offset, sizeof(QuerySync))); if (!sync) return error::kOutOfBounds; QueryManager::Query* query = query_manager_->GetQuery(client_id); if (!query) { if (!query_manager_->IsValidQuery(client_id)) { LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginQueryEXT", "id not made by glGenQueriesEXT"); return error::kNoError; } query = query_manager_->CreateQuery(target, client_id, std::move(buffer), sync); } else { if (query->target() != target) { LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginQueryEXT", "target does not match"); return error::kNoError; } else if (query->sync() != sync) { DLOG(ERROR) << "Shared memory used by query not the same as before"; return error::kInvalidArguments; } } query_manager_->BeginQuery(query); return error::kNoError; }
172,529
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: xmlParseCommentComplex(xmlParserCtxtPtr ctxt, xmlChar *buf, int len, int size) { int q, ql; int r, rl; int cur, l; int count = 0; int inputid; inputid = ctxt->input->id; if (buf == NULL) { len = 0; size = XML_PARSER_BUFFER_SIZE; buf = (xmlChar *) xmlMallocAtomic(size * sizeof(xmlChar)); if (buf == NULL) { xmlErrMemory(ctxt, NULL); return; } } GROW; /* Assure there's enough input data */ q = CUR_CHAR(ql); if (q == 0) goto not_terminated; if (!IS_CHAR(q)) { xmlFatalErrMsgInt(ctxt, XML_ERR_INVALID_CHAR, "xmlParseComment: invalid xmlChar value %d\n", q); xmlFree (buf); return; } NEXTL(ql); r = CUR_CHAR(rl); if (r == 0) goto not_terminated; if (!IS_CHAR(r)) { xmlFatalErrMsgInt(ctxt, XML_ERR_INVALID_CHAR, "xmlParseComment: invalid xmlChar value %d\n", q); xmlFree (buf); return; } NEXTL(rl); cur = CUR_CHAR(l); if (cur == 0) goto not_terminated; while (IS_CHAR(cur) && /* checked */ ((cur != '>') || (r != '-') || (q != '-'))) { if ((r == '-') && (q == '-')) { xmlFatalErr(ctxt, XML_ERR_HYPHEN_IN_COMMENT, NULL); } if (len + 5 >= size) { xmlChar *new_buf; size *= 2; new_buf = (xmlChar *) xmlRealloc(buf, size * sizeof(xmlChar)); if (new_buf == NULL) { xmlFree (buf); xmlErrMemory(ctxt, NULL); return; } buf = new_buf; } COPY_BUF(ql,buf,len,q); q = r; ql = rl; r = cur; rl = l; count++; if (count > 50) { GROW; count = 0; } NEXTL(l); cur = CUR_CHAR(l); if (cur == 0) { SHRINK; GROW; cur = CUR_CHAR(l); } } buf[len] = 0; if (cur == 0) { xmlFatalErrMsgStr(ctxt, XML_ERR_COMMENT_NOT_FINISHED, "Comment not terminated \n<!--%.50s\n", buf); } else if (!IS_CHAR(cur)) { xmlFatalErrMsgInt(ctxt, XML_ERR_INVALID_CHAR, "xmlParseComment: invalid xmlChar value %d\n", cur); } else { if (inputid != ctxt->input->id) { xmlFatalErrMsg(ctxt, XML_ERR_ENTITY_BOUNDARY, "Comment doesn't start and stop in the same entity\n"); } NEXT; if ((ctxt->sax != NULL) && (ctxt->sax->comment != NULL) && (!ctxt->disableSAX)) ctxt->sax->comment(ctxt->userData, buf); } xmlFree(buf); return; not_terminated: xmlFatalErrMsgStr(ctxt, XML_ERR_COMMENT_NOT_FINISHED, "Comment not terminated\n", NULL); xmlFree(buf); return; } Commit Message: libxml: XML_PARSER_EOF checks from upstream BUG=229019 TBR=cpu Review URL: https://chromiumcodereview.appspot.com/14053009 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@196804 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-119
xmlParseCommentComplex(xmlParserCtxtPtr ctxt, xmlChar *buf, int len, int size) { int q, ql; int r, rl; int cur, l; int count = 0; int inputid; inputid = ctxt->input->id; if (buf == NULL) { len = 0; size = XML_PARSER_BUFFER_SIZE; buf = (xmlChar *) xmlMallocAtomic(size * sizeof(xmlChar)); if (buf == NULL) { xmlErrMemory(ctxt, NULL); return; } } GROW; /* Assure there's enough input data */ q = CUR_CHAR(ql); if (q == 0) goto not_terminated; if (!IS_CHAR(q)) { xmlFatalErrMsgInt(ctxt, XML_ERR_INVALID_CHAR, "xmlParseComment: invalid xmlChar value %d\n", q); xmlFree (buf); return; } NEXTL(ql); r = CUR_CHAR(rl); if (r == 0) goto not_terminated; if (!IS_CHAR(r)) { xmlFatalErrMsgInt(ctxt, XML_ERR_INVALID_CHAR, "xmlParseComment: invalid xmlChar value %d\n", q); xmlFree (buf); return; } NEXTL(rl); cur = CUR_CHAR(l); if (cur == 0) goto not_terminated; while (IS_CHAR(cur) && /* checked */ ((cur != '>') || (r != '-') || (q != '-'))) { if ((r == '-') && (q == '-')) { xmlFatalErr(ctxt, XML_ERR_HYPHEN_IN_COMMENT, NULL); } if (len + 5 >= size) { xmlChar *new_buf; size *= 2; new_buf = (xmlChar *) xmlRealloc(buf, size * sizeof(xmlChar)); if (new_buf == NULL) { xmlFree (buf); xmlErrMemory(ctxt, NULL); return; } buf = new_buf; } COPY_BUF(ql,buf,len,q); q = r; ql = rl; r = cur; rl = l; count++; if (count > 50) { GROW; count = 0; if (ctxt->instate == XML_PARSER_EOF) { xmlFree(buf); return; } } NEXTL(l); cur = CUR_CHAR(l); if (cur == 0) { SHRINK; GROW; cur = CUR_CHAR(l); } } buf[len] = 0; if (cur == 0) { xmlFatalErrMsgStr(ctxt, XML_ERR_COMMENT_NOT_FINISHED, "Comment not terminated \n<!--%.50s\n", buf); } else if (!IS_CHAR(cur)) { xmlFatalErrMsgInt(ctxt, XML_ERR_INVALID_CHAR, "xmlParseComment: invalid xmlChar value %d\n", cur); } else { if (inputid != ctxt->input->id) { xmlFatalErrMsg(ctxt, XML_ERR_ENTITY_BOUNDARY, "Comment doesn't start and stop in the same entity\n"); } NEXT; if ((ctxt->sax != NULL) && (ctxt->sax->comment != NULL) && (!ctxt->disableSAX)) ctxt->sax->comment(ctxt->userData, buf); } xmlFree(buf); return; not_terminated: xmlFatalErrMsgStr(ctxt, XML_ERR_COMMENT_NOT_FINISHED, "Comment not terminated\n", NULL); xmlFree(buf); return; }
171,279
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) { struct ipv6_pinfo *np = inet6_sk(sk); struct tcp_sock *tp; struct sk_buff *opt_skb = NULL; /* Imagine: socket is IPv6. IPv4 packet arrives, goes to IPv4 receive handler and backlogged. From backlog it always goes here. Kerboom... Fortunately, tcp_rcv_established and rcv_established handle them correctly, but it is not case with tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK */ if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_do_rcv(sk, skb); if (sk_filter(sk, skb)) goto discard; /* * socket locking is here for SMP purposes as backlog rcv * is currently called with bh processing disabled. */ /* Do Stevens' IPV6_PKTOPTIONS. Yes, guys, it is the only place in our code, where we may make it not affecting IPv4. The rest of code is protocol independent, and I do not like idea to uglify IPv4. Actually, all the idea behind IPV6_PKTOPTIONS looks not very well thought. For now we latch options, received in the last packet, enqueued by tcp. Feel free to propose better solution. --ANK (980728) */ if (np->rxopt.all) opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ struct dst_entry *dst = sk->sk_rx_dst; sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || dst->ops->check(dst, np->rx_dst_cookie) == NULL) { dst_release(dst); sk->sk_rx_dst = NULL; } } tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len); if (opt_skb) goto ipv6_pktoptions; return 0; } if (tcp_checksum_complete(skb)) goto csum_err; if (sk->sk_state == TCP_LISTEN) { struct sock *nsk = tcp_v6_cookie_check(sk, skb); if (!nsk) goto discard; if (nsk != sk) { sock_rps_save_rxhash(nsk, skb); sk_mark_napi_id(nsk, skb); if (tcp_child_process(sk, nsk, skb)) goto reset; if (opt_skb) __kfree_skb(opt_skb); return 0; } } else sock_rps_save_rxhash(sk, skb); if (tcp_rcv_state_process(sk, skb)) goto reset; if (opt_skb) goto ipv6_pktoptions; return 0; reset: tcp_v6_send_reset(sk, skb); discard: if (opt_skb) __kfree_skb(opt_skb); kfree_skb(skb); return 0; csum_err: TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); goto discard; ipv6_pktoptions: /* Do you ask, what is it? 1. skb was enqueued by tcp. 2. skb is added to tail of read queue, rather than out of order. 3. socket is not in passive state. 4. Finally, it really contains options, which user wants to receive. */ tp = tcp_sk(sk); if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo) np->mcast_oif = tcp_v6_iif(opt_skb); if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit; if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass) np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb)); if (np->repflow) np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb)); if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) { skb_set_owner_r(opt_skb, sk); tcp_v6_restore_cb(opt_skb); opt_skb = xchg(&np->pktoptions, opt_skb); } else { __kfree_skb(opt_skb); opt_skb = xchg(&np->pktoptions, NULL); } } kfree_skb(opt_skb); return 0; } Commit Message: tcp: take care of truncations done by sk_filter() With syzkaller help, Marco Grassi found a bug in TCP stack, crashing in tcp_collapse() Root cause is that sk_filter() can truncate the incoming skb, but TCP stack was not really expecting this to happen. It probably was expecting a simple DROP or ACCEPT behavior. We first need to make sure no part of TCP header could be removed. Then we need to adjust TCP_SKB_CB(skb)->end_seq Many thanks to syzkaller team and Marco for giving us a reproducer. Signed-off-by: Eric Dumazet <[email protected]> Reported-by: Marco Grassi <[email protected]> Reported-by: Vladis Dronov <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-284
static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) { struct ipv6_pinfo *np = inet6_sk(sk); struct tcp_sock *tp; struct sk_buff *opt_skb = NULL; /* Imagine: socket is IPv6. IPv4 packet arrives, goes to IPv4 receive handler and backlogged. From backlog it always goes here. Kerboom... Fortunately, tcp_rcv_established and rcv_established handle them correctly, but it is not case with tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK */ if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_do_rcv(sk, skb); if (tcp_filter(sk, skb)) goto discard; /* * socket locking is here for SMP purposes as backlog rcv * is currently called with bh processing disabled. */ /* Do Stevens' IPV6_PKTOPTIONS. Yes, guys, it is the only place in our code, where we may make it not affecting IPv4. The rest of code is protocol independent, and I do not like idea to uglify IPv4. Actually, all the idea behind IPV6_PKTOPTIONS looks not very well thought. For now we latch options, received in the last packet, enqueued by tcp. Feel free to propose better solution. --ANK (980728) */ if (np->rxopt.all) opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ struct dst_entry *dst = sk->sk_rx_dst; sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || dst->ops->check(dst, np->rx_dst_cookie) == NULL) { dst_release(dst); sk->sk_rx_dst = NULL; } } tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len); if (opt_skb) goto ipv6_pktoptions; return 0; } if (tcp_checksum_complete(skb)) goto csum_err; if (sk->sk_state == TCP_LISTEN) { struct sock *nsk = tcp_v6_cookie_check(sk, skb); if (!nsk) goto discard; if (nsk != sk) { sock_rps_save_rxhash(nsk, skb); sk_mark_napi_id(nsk, skb); if (tcp_child_process(sk, nsk, skb)) goto reset; if (opt_skb) __kfree_skb(opt_skb); return 0; } } else sock_rps_save_rxhash(sk, skb); if (tcp_rcv_state_process(sk, skb)) goto reset; if (opt_skb) goto ipv6_pktoptions; return 0; reset: tcp_v6_send_reset(sk, skb); discard: if (opt_skb) __kfree_skb(opt_skb); kfree_skb(skb); return 0; csum_err: TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); goto discard; ipv6_pktoptions: /* Do you ask, what is it? 1. skb was enqueued by tcp. 2. skb is added to tail of read queue, rather than out of order. 3. socket is not in passive state. 4. Finally, it really contains options, which user wants to receive. */ tp = tcp_sk(sk); if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo) np->mcast_oif = tcp_v6_iif(opt_skb); if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit; if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass) np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb)); if (np->repflow) np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb)); if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) { skb_set_owner_r(opt_skb, sk); tcp_v6_restore_cb(opt_skb); opt_skb = xchg(&np->pktoptions, opt_skb); } else { __kfree_skb(opt_skb); opt_skb = xchg(&np->pktoptions, NULL); } } kfree_skb(opt_skb); return 0; }
166,914
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int InternalPageInfoBubbleView::GetDialogButtons() const { return ui::DIALOG_BUTTON_NONE; } Commit Message: Desktop Page Info/Harmony: Show close button for internal pages. The Harmony version of Page Info for internal Chrome pages (chrome://, chrome-extension:// and view-source:// pages) show a close button. Update the code to match this. This patch also adds TestBrowserDialog tests for the latter two cases described above (internal extension and view source pages). See screenshot - https://drive.google.com/file/d/18RZnMiHCu-rCX9N6DLUpu4mkFWguh1xm/view?usp=sharing Bug: 535074 Change-Id: I55e5f1aa682fd4ec85f7b65ac88f5a4f5906fe53 Reviewed-on: https://chromium-review.googlesource.com/759624 Commit-Queue: Patti <[email protected]> Reviewed-by: Trent Apted <[email protected]> Cr-Commit-Position: refs/heads/master@{#516624} CWE ID: CWE-704
int InternalPageInfoBubbleView::GetDialogButtons() const {
172,297
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: Mac_Read_POST_Resource( FT_Library library, FT_Stream stream, FT_Long *offsets, FT_Long resource_cnt, FT_Long face_index, FT_Face *aface ) { FT_Error error = FT_Err_Cannot_Open_Resource; FT_Memory memory = library->memory; FT_Byte* pfb_data; int i, type, flags; FT_Long len; FT_Long pfb_len, pfb_pos, pfb_lenpos; FT_Long rlen, temp; if ( face_index == -1 ) face_index = 0; if ( face_index != 0 ) return error; /* Find the length of all the POST resources, concatenated. Assume */ /* worst case (each resource in its own section). */ pfb_len = 0; for ( i = 0; i < resource_cnt; ++i ) { error = FT_Stream_Seek( stream, offsets[i] ); if ( error ) goto Exit; if ( FT_READ_LONG( temp ) ) goto Exit; pfb_len += temp + 6; } if ( FT_ALLOC( pfb_data, (FT_Long)pfb_len + 2 ) ) goto Exit; pfb_data[0] = 0x80; pfb_data[1] = 1; /* Ascii section */ pfb_data[2] = 0; /* 4-byte length, fill in later */ pfb_data[3] = 0; pfb_data[4] = 0; pfb_data[5] = 0; pfb_pos = 6; pfb_lenpos = 2; len = 0; type = 1; for ( i = 0; i < resource_cnt; ++i ) { error = FT_Stream_Seek( stream, offsets[i] ); if ( error ) goto Exit2; if ( FT_READ_LONG( rlen ) ) goto Exit; if ( FT_READ_USHORT( flags ) ) goto Exit; rlen -= 2; /* the flags are part of the resource */ if ( ( flags >> 8 ) == type ) len += rlen; else { pfb_data[pfb_lenpos ] = (FT_Byte)( len ); pfb_data[pfb_lenpos + 1] = (FT_Byte)( len >> 8 ); pfb_data[pfb_lenpos + 2] = (FT_Byte)( len >> 16 ); pfb_data[pfb_lenpos + 3] = (FT_Byte)( len >> 24 ); if ( ( flags >> 8 ) == 5 ) /* End of font mark */ break; pfb_data[pfb_pos++] = 0x80; type = flags >> 8; len = rlen; pfb_data[pfb_pos++] = (FT_Byte)type; pfb_lenpos = pfb_pos; pfb_data[pfb_pos++] = 0; /* 4-byte length, fill in later */ pfb_data[pfb_pos++] = 0; pfb_data[pfb_pos++] = 0; pfb_data[pfb_pos++] = 0; } error = FT_Stream_Read( stream, (FT_Byte *)pfb_data + pfb_pos, rlen ); pfb_pos += rlen; } pfb_data[pfb_lenpos ] = (FT_Byte)( len ); pfb_data[pfb_lenpos + 1] = (FT_Byte)( len >> 8 ); pfb_data[pfb_lenpos + 2] = (FT_Byte)( len >> 16 ); pfb_data[pfb_lenpos + 3] = (FT_Byte)( len >> 24 ); return open_face_from_buffer( library, pfb_data, pfb_pos, face_index, "type1", aface ); Exit2: FT_FREE( pfb_data ); Exit: return error; } Commit Message: CWE ID: CWE-119
Mac_Read_POST_Resource( FT_Library library, FT_Stream stream, FT_Long *offsets, FT_Long resource_cnt, FT_Long face_index, FT_Face *aface ) { FT_Error error = FT_Err_Cannot_Open_Resource; FT_Memory memory = library->memory; FT_Byte* pfb_data; int i, type, flags; FT_Long len; FT_Long pfb_len, pfb_pos, pfb_lenpos; FT_Long rlen, temp; if ( face_index == -1 ) face_index = 0; if ( face_index != 0 ) return error; /* Find the length of all the POST resources, concatenated. Assume */ /* worst case (each resource in its own section). */ pfb_len = 0; for ( i = 0; i < resource_cnt; ++i ) { error = FT_Stream_Seek( stream, offsets[i] ); if ( error ) goto Exit; if ( FT_READ_LONG( temp ) ) goto Exit; pfb_len += temp + 6; } if ( FT_ALLOC( pfb_data, (FT_Long)pfb_len + 2 ) ) goto Exit; pfb_data[0] = 0x80; pfb_data[1] = 1; /* Ascii section */ pfb_data[2] = 0; /* 4-byte length, fill in later */ pfb_data[3] = 0; pfb_data[4] = 0; pfb_data[5] = 0; pfb_pos = 6; pfb_lenpos = 2; len = 0; type = 1; for ( i = 0; i < resource_cnt; ++i ) { error = FT_Stream_Seek( stream, offsets[i] ); if ( error ) goto Exit2; if ( FT_READ_LONG( rlen ) ) goto Exit; if ( FT_READ_USHORT( flags ) ) goto Exit; rlen -= 2; /* the flags are part of the resource */ if ( ( flags >> 8 ) == type ) len += rlen; else { pfb_data[pfb_lenpos ] = (FT_Byte)( len ); pfb_data[pfb_lenpos + 1] = (FT_Byte)( len >> 8 ); pfb_data[pfb_lenpos + 2] = (FT_Byte)( len >> 16 ); pfb_data[pfb_lenpos + 3] = (FT_Byte)( len >> 24 ); if ( ( flags >> 8 ) == 5 ) /* End of font mark */ break; pfb_data[pfb_pos++] = 0x80; type = flags >> 8; len = rlen; pfb_data[pfb_pos++] = (FT_Byte)type; pfb_lenpos = pfb_pos; pfb_data[pfb_pos++] = 0; /* 4-byte length, fill in later */ pfb_data[pfb_pos++] = 0; pfb_data[pfb_pos++] = 0; pfb_data[pfb_pos++] = 0; } error = FT_Stream_Read( stream, (FT_Byte *)pfb_data + pfb_pos, rlen ); if ( error ) goto Exit2; pfb_pos += rlen; } pfb_data[pfb_lenpos ] = (FT_Byte)( len ); pfb_data[pfb_lenpos + 1] = (FT_Byte)( len >> 8 ); pfb_data[pfb_lenpos + 2] = (FT_Byte)( len >> 16 ); pfb_data[pfb_lenpos + 3] = (FT_Byte)( len >> 24 ); return open_face_from_buffer( library, pfb_data, pfb_pos, face_index, "type1", aface ); Exit2: FT_FREE( pfb_data ); Exit: return error; }
165,006
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void Register(const GURL& url, const base::FilePath& root_http, ReportResponseHeadersOnUI report_on_ui) { EXPECT_TRUE( content::BrowserThread::CurrentlyOn(content::BrowserThread::IO)); base::FilePath file_path(root_http); file_path = file_path.AppendASCII(url.scheme() + "." + url.host() + ".html"); net::URLRequestFilter::GetInstance()->AddUrlInterceptor( url, base::WrapUnique( new MirrorMockJobInterceptor(file_path, report_on_ui))); } Commit Message: Fix ChromeResourceDispatcherHostDelegateMirrorBrowserTest.MirrorRequestHeader with network service. The functionality worked, as part of converting DICE, however the test code didn't work since it depended on accessing the net objects directly. Switch the tests to use the EmbeddedTestServer, to better match production, which removes the dependency on net/. Also: -make GetFilePathWithReplacements replace strings in the mock headers if they're present -add a global to google_util to ignore ports; that way other tests can be converted without having to modify each callsite to google_util Bug: 881976 Change-Id: Ic52023495c1c98c1248025c11cdf37f433fef058 Reviewed-on: https://chromium-review.googlesource.com/c/1328142 Commit-Queue: John Abd-El-Malek <[email protected]> Reviewed-by: Ramin Halavati <[email protected]> Reviewed-by: Maks Orlovich <[email protected]> Reviewed-by: Peter Kasting <[email protected]> Cr-Commit-Position: refs/heads/master@{#607652} CWE ID:
static void Register(const GURL& url,
172,579
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: PHP_FUNCTION(locale_compose) { smart_str loc_name_s = {0}; smart_str *loc_name = &loc_name_s; zval* arr = NULL; HashTable* hash_arr = NULL; int result = 0; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "a", &arr) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_compose: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } hash_arr = HASH_OF( arr ); if( !hash_arr || zend_hash_num_elements( hash_arr ) == 0 ) RETURN_FALSE; /* Check for grandfathered first */ result = append_key_value(loc_name, hash_arr, LOC_GRANDFATHERED_LANG_TAG); if( result == SUCCESS){ RETURN_SMART_STR(loc_name); } if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Not grandfathered */ result = append_key_value(loc_name, hash_arr , LOC_LANG_TAG); if( result == LOC_NOT_FOUND ){ intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_compose: parameter array does not contain 'language' tag.", 0 TSRMLS_CC ); smart_str_free(loc_name); RETURN_FALSE; } if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Extlang */ result = append_multiple_key_values(loc_name, hash_arr , LOC_EXTLANG_TAG TSRMLS_CC); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Script */ result = append_key_value(loc_name, hash_arr , LOC_SCRIPT_TAG); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Region */ result = append_key_value( loc_name, hash_arr , LOC_REGION_TAG); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Variant */ result = append_multiple_key_values( loc_name, hash_arr , LOC_VARIANT_TAG TSRMLS_CC); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Private */ result = append_multiple_key_values( loc_name, hash_arr , LOC_PRIVATE_TAG TSRMLS_CC); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } RETURN_SMART_STR(loc_name); } Commit Message: Fix bug #72241: get_icu_value_internal out-of-bounds read CWE ID: CWE-125
PHP_FUNCTION(locale_compose) { smart_str loc_name_s = {0}; smart_str *loc_name = &loc_name_s; zval* arr = NULL; HashTable* hash_arr = NULL; int result = 0; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "a", &arr) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_compose: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } hash_arr = HASH_OF( arr ); if( !hash_arr || zend_hash_num_elements( hash_arr ) == 0 ) RETURN_FALSE; /* Check for grandfathered first */ result = append_key_value(loc_name, hash_arr, LOC_GRANDFATHERED_LANG_TAG); if( result == SUCCESS){ RETURN_SMART_STR(loc_name); } if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Not grandfathered */ result = append_key_value(loc_name, hash_arr , LOC_LANG_TAG); if( result == LOC_NOT_FOUND ){ intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_compose: parameter array does not contain 'language' tag.", 0 TSRMLS_CC ); smart_str_free(loc_name); RETURN_FALSE; } if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Extlang */ result = append_multiple_key_values(loc_name, hash_arr , LOC_EXTLANG_TAG TSRMLS_CC); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Script */ result = append_key_value(loc_name, hash_arr , LOC_SCRIPT_TAG); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Region */ result = append_key_value( loc_name, hash_arr , LOC_REGION_TAG); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Variant */ result = append_multiple_key_values( loc_name, hash_arr , LOC_VARIANT_TAG TSRMLS_CC); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Private */ result = append_multiple_key_values( loc_name, hash_arr , LOC_PRIVATE_TAG TSRMLS_CC); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } RETURN_SMART_STR(loc_name); }
167,191
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: WORD32 ih264d_start_of_pic(dec_struct_t *ps_dec, WORD32 i4_poc, pocstruct_t *ps_temp_poc, UWORD16 u2_frame_num, dec_pic_params_t *ps_pps) { pocstruct_t *ps_prev_poc = &ps_dec->s_cur_pic_poc; pocstruct_t *ps_cur_poc = ps_temp_poc; pic_buffer_t *pic_buf; ivd_video_decode_op_t * ps_dec_output = (ivd_video_decode_op_t *)ps_dec->pv_dec_out; dec_slice_params_t *ps_cur_slice = ps_dec->ps_cur_slice; dec_seq_params_t *ps_seq = ps_pps->ps_sps; UWORD8 u1_bottom_field_flag = ps_cur_slice->u1_bottom_field_flag; UWORD8 u1_field_pic_flag = ps_cur_slice->u1_field_pic_flag; /* high profile related declarations */ high_profile_tools_t s_high_profile; WORD32 ret; H264_MUTEX_LOCK(&ps_dec->process_disp_mutex); ps_prev_poc->i4_pic_order_cnt_lsb = ps_cur_poc->i4_pic_order_cnt_lsb; ps_prev_poc->i4_pic_order_cnt_msb = ps_cur_poc->i4_pic_order_cnt_msb; ps_prev_poc->i4_delta_pic_order_cnt_bottom = ps_cur_poc->i4_delta_pic_order_cnt_bottom; ps_prev_poc->i4_delta_pic_order_cnt[0] = ps_cur_poc->i4_delta_pic_order_cnt[0]; ps_prev_poc->i4_delta_pic_order_cnt[1] = ps_cur_poc->i4_delta_pic_order_cnt[1]; ps_prev_poc->u1_bot_field = ps_dec->ps_cur_slice->u1_bottom_field_flag; ps_prev_poc->i4_prev_frame_num_ofst = ps_cur_poc->i4_prev_frame_num_ofst; ps_prev_poc->u2_frame_num = u2_frame_num; ps_dec->i1_prev_mb_qp_delta = 0; ps_dec->i1_next_ctxt_idx = 0; ps_dec->u4_nmb_deblk = 0; if(ps_dec->u4_num_cores == 1) ps_dec->u4_nmb_deblk = 1; if(ps_seq->u1_mb_aff_flag == 1) { ps_dec->u4_nmb_deblk = 0; if(ps_dec->u4_num_cores > 2) ps_dec->u4_num_cores = 2; } ps_dec->u4_use_intrapred_line_copy = 0; if (ps_seq->u1_mb_aff_flag == 0) { ps_dec->u4_use_intrapred_line_copy = 1; } ps_dec->u4_app_disable_deblk_frm = 0; /* If degrade is enabled, set the degrade flags appropriately */ if(ps_dec->i4_degrade_type && ps_dec->i4_degrade_pics) { WORD32 degrade_pic; ps_dec->i4_degrade_pic_cnt++; degrade_pic = 0; /* If degrade is to be done in all frames, then do not check further */ switch(ps_dec->i4_degrade_pics) { case 4: { degrade_pic = 1; break; } case 3: { if(ps_cur_slice->u1_slice_type != I_SLICE) degrade_pic = 1; break; } case 2: { /* If pic count hits non-degrade interval or it is an islice, then do not degrade */ if((ps_cur_slice->u1_slice_type != I_SLICE) && (ps_dec->i4_degrade_pic_cnt != ps_dec->i4_nondegrade_interval)) degrade_pic = 1; break; } case 1: { /* Check if the current picture is non-ref */ if(0 == ps_cur_slice->u1_nal_ref_idc) { degrade_pic = 1; } break; } } if(degrade_pic) { if(ps_dec->i4_degrade_type & 0x2) ps_dec->u4_app_disable_deblk_frm = 1; /* MC degrading is done only for non-ref pictures */ if(0 == ps_cur_slice->u1_nal_ref_idc) { if(ps_dec->i4_degrade_type & 0x4) ps_dec->i4_mv_frac_mask = 0; if(ps_dec->i4_degrade_type & 0x8) ps_dec->i4_mv_frac_mask = 0; } } else ps_dec->i4_degrade_pic_cnt = 0; } { dec_err_status_t * ps_err = ps_dec->ps_dec_err_status; if(ps_dec->u1_sl_typ_5_9 && ((ps_cur_slice->u1_slice_type == I_SLICE) || (ps_cur_slice->u1_slice_type == SI_SLICE))) ps_err->u1_cur_pic_type = PIC_TYPE_I; else ps_err->u1_cur_pic_type = PIC_TYPE_UNKNOWN; if(ps_err->u1_pic_aud_i == PIC_TYPE_I) { ps_err->u1_cur_pic_type = PIC_TYPE_I; ps_err->u1_pic_aud_i = PIC_TYPE_UNKNOWN; } if(ps_cur_slice->u1_nal_unit_type == IDR_SLICE_NAL) { if(ps_err->u1_err_flag) ih264d_reset_ref_bufs(ps_dec->ps_dpb_mgr); ps_err->u1_err_flag = ACCEPT_ALL_PICS; } } if(ps_dec->u1_init_dec_flag && ps_dec->s_prev_seq_params.u1_eoseq_pending) { /* Reset the decoder picture buffers */ WORD32 j; for(j = 0; j < MAX_DISP_BUFS_NEW; j++) { ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_pic_buf_mgr, j, BUF_MGR_REF); ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_mv_buf_mgr, ps_dec->au1_pic_buf_id_mv_buf_id_map[j], BUF_MGR_REF); ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_pic_buf_mgr, j, BUF_MGR_IO); } /* reset the decoder structure parameters related to buffer handling */ ps_dec->u1_second_field = 0; ps_dec->i4_cur_display_seq = 0; /********************************************************************/ /* indicate in the decoder output i4_status that some frames are being */ /* dropped, so that it resets timestamp and wait for a new sequence */ /********************************************************************/ ps_dec->s_prev_seq_params.u1_eoseq_pending = 0; } ret = ih264d_init_pic(ps_dec, u2_frame_num, i4_poc, ps_pps); if(ret != OK) return ret; ps_dec->pv_parse_tu_coeff_data = ps_dec->pv_pic_tu_coeff_data; ps_dec->pv_proc_tu_coeff_data = ps_dec->pv_pic_tu_coeff_data; ps_dec->ps_nmb_info = ps_dec->ps_frm_mb_info; if(ps_dec->u1_separate_parse) { UWORD16 pic_wd = ps_dec->u4_width_at_init; UWORD16 pic_ht = ps_dec->u4_height_at_init; UWORD32 num_mbs; if((NULL != ps_dec->ps_cur_sps) && (1 == (ps_dec->ps_cur_sps->u1_is_valid))) { pic_wd = ps_dec->u2_pic_wd; pic_ht = ps_dec->u2_pic_ht; } num_mbs = (pic_wd * pic_ht) >> 8; if(ps_dec->pu1_dec_mb_map) { memset((void *)ps_dec->pu1_dec_mb_map, 0, num_mbs); } if(ps_dec->pu1_recon_mb_map) { memset((void *)ps_dec->pu1_recon_mb_map, 0, num_mbs); } if(ps_dec->pu2_slice_num_map) { memset((void *)ps_dec->pu2_slice_num_map, 0, (num_mbs * sizeof(UWORD16))); } } ps_dec->ps_parse_cur_slice = &(ps_dec->ps_dec_slice_buf[0]); ps_dec->ps_decode_cur_slice = &(ps_dec->ps_dec_slice_buf[0]); ps_dec->ps_computebs_cur_slice = &(ps_dec->ps_dec_slice_buf[0]); ps_dec->u2_cur_slice_num = 0; /* Initialize all the HP toolsets to zero */ ps_dec->s_high_profile.u1_scaling_present = 0; ps_dec->s_high_profile.u1_transform8x8_present = 0; /* Get Next Free Picture */ if(1 == ps_dec->u4_share_disp_buf) { UWORD32 i; /* Free any buffer that is in the queue to be freed */ for(i = 0; i < MAX_DISP_BUFS_NEW; i++) { if(0 == ps_dec->u4_disp_buf_to_be_freed[i]) continue; ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_pic_buf_mgr, i, BUF_MGR_IO); ps_dec->u4_disp_buf_to_be_freed[i] = 0; ps_dec->u4_disp_buf_mapping[i] = 0; } } if(!(u1_field_pic_flag && 0 != ps_dec->u1_top_bottom_decoded)) //ps_dec->u1_second_field)) { pic_buffer_t *ps_cur_pic; WORD32 cur_pic_buf_id, cur_mv_buf_id; col_mv_buf_t *ps_col_mv; while(1) { ps_cur_pic = (pic_buffer_t *)ih264_buf_mgr_get_next_free( (buf_mgr_t *)ps_dec->pv_pic_buf_mgr, &cur_pic_buf_id); if(ps_cur_pic == NULL) { ps_dec->i4_error_code = ERROR_UNAVAIL_PICBUF_T; return ERROR_UNAVAIL_PICBUF_T; } if(0 == ps_dec->u4_disp_buf_mapping[cur_pic_buf_id]) { break; } } ps_col_mv = (col_mv_buf_t *)ih264_buf_mgr_get_next_free((buf_mgr_t *)ps_dec->pv_mv_buf_mgr, &cur_mv_buf_id); if(ps_col_mv == NULL) { ps_dec->i4_error_code = ERROR_UNAVAIL_MVBUF_T; return ERROR_UNAVAIL_MVBUF_T; } ps_dec->ps_cur_pic = ps_cur_pic; ps_dec->u1_pic_buf_id = cur_pic_buf_id; ps_cur_pic->u4_ts = ps_dec->u4_ts; ps_cur_pic->u1_mv_buf_id = cur_mv_buf_id; ps_dec->au1_pic_buf_id_mv_buf_id_map[cur_pic_buf_id] = cur_mv_buf_id; ps_cur_pic->pu1_col_zero_flag = (UWORD8 *)ps_col_mv->pv_col_zero_flag; ps_cur_pic->ps_mv = (mv_pred_t *)ps_col_mv->pv_mv; ps_dec->au1_pic_buf_ref_flag[cur_pic_buf_id] = 0; if(ps_dec->u1_first_slice_in_stream) { /*make first entry of list0 point to cur pic,so that if first Islice is in error, ref pic struct will have valid entries*/ ps_dec->ps_ref_pic_buf_lx[0] = ps_dec->ps_dpb_mgr->ps_init_dpb[0]; *(ps_dec->ps_dpb_mgr->ps_init_dpb[0][0]) = *ps_cur_pic; /* Initialize for field reference as well */ *(ps_dec->ps_dpb_mgr->ps_init_dpb[0][MAX_REF_BUFS]) = *ps_cur_pic; } if(!ps_dec->ps_cur_pic) { WORD32 j; H264_DEC_DEBUG_PRINT("------- Display Buffers Reset --------\n"); for(j = 0; j < MAX_DISP_BUFS_NEW; j++) { ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_pic_buf_mgr, j, BUF_MGR_REF); ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_mv_buf_mgr, ps_dec->au1_pic_buf_id_mv_buf_id_map[j], BUF_MGR_REF); ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_pic_buf_mgr, j, BUF_MGR_IO); } ps_dec->i4_cur_display_seq = 0; ps_dec->i4_prev_max_display_seq = 0; ps_dec->i4_max_poc = 0; ps_cur_pic = (pic_buffer_t *)ih264_buf_mgr_get_next_free( (buf_mgr_t *)ps_dec->pv_pic_buf_mgr, &cur_pic_buf_id); if(ps_cur_pic == NULL) { ps_dec->i4_error_code = ERROR_UNAVAIL_PICBUF_T; return ERROR_UNAVAIL_PICBUF_T; } ps_col_mv = (col_mv_buf_t *)ih264_buf_mgr_get_next_free((buf_mgr_t *)ps_dec->pv_mv_buf_mgr, &cur_mv_buf_id); if(ps_col_mv == NULL) { ps_dec->i4_error_code = ERROR_UNAVAIL_MVBUF_T; return ERROR_UNAVAIL_MVBUF_T; } ps_dec->ps_cur_pic = ps_cur_pic; ps_dec->u1_pic_buf_id = cur_pic_buf_id; ps_cur_pic->u4_ts = ps_dec->u4_ts; ps_dec->apv_buf_id_pic_buf_map[cur_pic_buf_id] = (void *)ps_cur_pic; ps_cur_pic->u1_mv_buf_id = cur_mv_buf_id; ps_dec->au1_pic_buf_id_mv_buf_id_map[cur_pic_buf_id] = cur_mv_buf_id; ps_cur_pic->pu1_col_zero_flag = (UWORD8 *)ps_col_mv->pv_col_zero_flag; ps_cur_pic->ps_mv = (mv_pred_t *)ps_col_mv->pv_mv; ps_dec->au1_pic_buf_ref_flag[cur_pic_buf_id] = 0; } ps_dec->ps_cur_pic->u1_picturetype = u1_field_pic_flag; ps_dec->ps_cur_pic->u4_pack_slc_typ = SKIP_NONE; H264_DEC_DEBUG_PRINT("got a buffer\n"); } else { H264_DEC_DEBUG_PRINT("did not get a buffer\n"); } ps_dec->u4_pic_buf_got = 1; ps_dec->ps_cur_pic->i4_poc = i4_poc; ps_dec->ps_cur_pic->i4_frame_num = u2_frame_num; ps_dec->ps_cur_pic->i4_pic_num = u2_frame_num; ps_dec->ps_cur_pic->i4_top_field_order_cnt = ps_pps->i4_top_field_order_cnt; ps_dec->ps_cur_pic->i4_bottom_field_order_cnt = ps_pps->i4_bottom_field_order_cnt; ps_dec->ps_cur_pic->i4_avg_poc = ps_pps->i4_avg_poc; ps_dec->ps_cur_pic->u4_time_stamp = ps_dec->u4_pts; ps_dec->s_cur_pic = *(ps_dec->ps_cur_pic); if(u1_field_pic_flag && u1_bottom_field_flag) { WORD32 i4_temp_poc; WORD32 i4_top_field_order_poc, i4_bot_field_order_poc; /* Point to odd lines, since it's bottom field */ ps_dec->s_cur_pic.pu1_buf1 += ps_dec->s_cur_pic.u2_frm_wd_y; ps_dec->s_cur_pic.pu1_buf2 += ps_dec->s_cur_pic.u2_frm_wd_uv; ps_dec->s_cur_pic.pu1_buf3 += ps_dec->s_cur_pic.u2_frm_wd_uv; ps_dec->s_cur_pic.ps_mv += ((ps_dec->u2_pic_ht * ps_dec->u2_pic_wd) >> 5); ps_dec->s_cur_pic.pu1_col_zero_flag += ((ps_dec->u2_pic_ht * ps_dec->u2_pic_wd) >> 5); ps_dec->ps_cur_pic->u1_picturetype |= BOT_FLD; i4_top_field_order_poc = ps_dec->ps_cur_pic->i4_top_field_order_cnt; i4_bot_field_order_poc = ps_dec->ps_cur_pic->i4_bottom_field_order_cnt; i4_temp_poc = MIN(i4_top_field_order_poc, i4_bot_field_order_poc); ps_dec->ps_cur_pic->i4_avg_poc = i4_temp_poc; } ps_cur_slice->u1_mbaff_frame_flag = ps_seq->u1_mb_aff_flag && (!u1_field_pic_flag); ps_dec->ps_cur_pic->u1_picturetype |= (ps_cur_slice->u1_mbaff_frame_flag << 2); ps_dec->ps_cur_mb_row = ps_dec->ps_nbr_mb_row; //[0]; ps_dec->ps_cur_mb_row++; //Increment by 1 ,so that left mb will always be valid ps_dec->ps_top_mb_row = ps_dec->ps_nbr_mb_row + ((ps_dec->u2_frm_wd_in_mbs + 1) << (1 - ps_dec->ps_cur_sps->u1_frame_mbs_only_flag)); ps_dec->ps_top_mb_row++; //Increment by 1 ,so that left mb will always be valid ps_dec->pu1_y = ps_dec->pu1_y_scratch[0]; ps_dec->pu1_u = ps_dec->pu1_u_scratch[0]; ps_dec->pu1_v = ps_dec->pu1_v_scratch[0]; ps_dec->u1_yuv_scratch_idx = 0; /* CHANGED CODE */ ps_dec->ps_mv_cur = ps_dec->s_cur_pic.ps_mv; ps_dec->ps_mv_top = ps_dec->ps_mv_top_p[0]; /* CHANGED CODE */ ps_dec->u1_mv_top_p = 0; ps_dec->u1_mb_idx = 0; /* CHANGED CODE */ ps_dec->ps_mv_left = ps_dec->s_cur_pic.ps_mv; ps_dec->pu1_yleft = 0; ps_dec->pu1_uleft = 0; ps_dec->pu1_vleft = 0; ps_dec->u1_not_wait_rec = 2; ps_dec->u2_total_mbs_coded = 0; ps_dec->i4_submb_ofst = -(SUB_BLK_SIZE); ps_dec->u4_pred_info_idx = 0; ps_dec->u4_pred_info_pkd_idx = 0; ps_dec->u4_dma_buf_idx = 0; ps_dec->ps_mv = ps_dec->s_cur_pic.ps_mv; ps_dec->ps_mv_bank_cur = ps_dec->s_cur_pic.ps_mv; ps_dec->pu1_col_zero_flag = ps_dec->s_cur_pic.pu1_col_zero_flag; ps_dec->ps_part = ps_dec->ps_parse_part_params; ps_dec->i2_prev_slice_mbx = -1; ps_dec->i2_prev_slice_mby = 0; ps_dec->u2_mv_2mb[0] = 0; ps_dec->u2_mv_2mb[1] = 0; ps_dec->u1_last_pic_not_decoded = 0; ps_dec->u2_cur_slice_num_dec_thread = 0; ps_dec->u2_cur_slice_num_bs = 0; ps_dec->u4_intra_pred_line_ofst = 0; ps_dec->pu1_cur_y_intra_pred_line = ps_dec->pu1_y_intra_pred_line; ps_dec->pu1_cur_u_intra_pred_line = ps_dec->pu1_u_intra_pred_line; ps_dec->pu1_cur_v_intra_pred_line = ps_dec->pu1_v_intra_pred_line; ps_dec->pu1_cur_y_intra_pred_line_base = ps_dec->pu1_y_intra_pred_line; ps_dec->pu1_cur_u_intra_pred_line_base = ps_dec->pu1_u_intra_pred_line; ps_dec->pu1_cur_v_intra_pred_line_base = ps_dec->pu1_v_intra_pred_line; ps_dec->pu1_prev_y_intra_pred_line = ps_dec->pu1_y_intra_pred_line + (ps_dec->u2_frm_wd_in_mbs * MB_SIZE); ps_dec->pu1_prev_u_intra_pred_line = ps_dec->pu1_u_intra_pred_line + ps_dec->u2_frm_wd_in_mbs * BLK8x8SIZE * YUV420SP_FACTOR; ps_dec->pu1_prev_v_intra_pred_line = ps_dec->pu1_v_intra_pred_line + ps_dec->u2_frm_wd_in_mbs * BLK8x8SIZE; ps_dec->ps_deblk_mbn = ps_dec->ps_deblk_pic; ps_dec->ps_deblk_mbn_curr = ps_dec->ps_deblk_mbn; ps_dec->ps_deblk_mbn_prev = ps_dec->ps_deblk_mbn + ps_dec->u1_recon_mb_grp; /* Initialize The Function Pointer Depending Upon the Entropy and MbAff Flag */ { if(ps_cur_slice->u1_mbaff_frame_flag) { ps_dec->pf_compute_bs = ih264d_compute_bs_mbaff; ps_dec->pf_mvpred = ih264d_mvpred_mbaff; } else { ps_dec->pf_compute_bs = ih264d_compute_bs_non_mbaff; ps_dec->u1_cur_mb_fld_dec_flag = ps_cur_slice->u1_field_pic_flag; } } /* Set up the Parameter for DMA transfer */ { UWORD8 u1_field_pic_flag = ps_dec->ps_cur_slice->u1_field_pic_flag; UWORD8 u1_mbaff = ps_cur_slice->u1_mbaff_frame_flag; UWORD8 uc_lastmbs = (((ps_dec->u2_pic_wd) >> 4) % (ps_dec->u1_recon_mb_grp >> u1_mbaff)); UWORD16 ui16_lastmbs_widthY = (uc_lastmbs ? (uc_lastmbs << 4) : ((ps_dec->u1_recon_mb_grp >> u1_mbaff) << 4)); UWORD16 ui16_lastmbs_widthUV = uc_lastmbs ? (uc_lastmbs << 3) : ((ps_dec->u1_recon_mb_grp >> u1_mbaff) << 3); ps_dec->s_tran_addrecon.pu1_dest_y = ps_dec->s_cur_pic.pu1_buf1; ps_dec->s_tran_addrecon.pu1_dest_u = ps_dec->s_cur_pic.pu1_buf2; ps_dec->s_tran_addrecon.pu1_dest_v = ps_dec->s_cur_pic.pu1_buf3; ps_dec->s_tran_addrecon.u2_frm_wd_y = ps_dec->u2_frm_wd_y << u1_field_pic_flag; ps_dec->s_tran_addrecon.u2_frm_wd_uv = ps_dec->u2_frm_wd_uv << u1_field_pic_flag; if(u1_field_pic_flag) { ui16_lastmbs_widthY += ps_dec->u2_frm_wd_y; ui16_lastmbs_widthUV += ps_dec->u2_frm_wd_uv; } /* Normal Increment of Pointer */ ps_dec->s_tran_addrecon.u4_inc_y[0] = ((ps_dec->u1_recon_mb_grp << 4) >> u1_mbaff); ps_dec->s_tran_addrecon.u4_inc_uv[0] = ((ps_dec->u1_recon_mb_grp << 4) >> u1_mbaff); /* End of Row Increment */ ps_dec->s_tran_addrecon.u4_inc_y[1] = (ui16_lastmbs_widthY + (PAD_LEN_Y_H << 1) + ps_dec->s_tran_addrecon.u2_frm_wd_y * ((15 << u1_mbaff) + u1_mbaff)); ps_dec->s_tran_addrecon.u4_inc_uv[1] = (ui16_lastmbs_widthUV + (PAD_LEN_UV_H << 2) + ps_dec->s_tran_addrecon.u2_frm_wd_uv * ((15 << u1_mbaff) + u1_mbaff)); /* Assign picture numbers to each frame/field */ /* only once per picture. */ ih264d_assign_pic_num(ps_dec); ps_dec->s_tran_addrecon.u2_mv_top_left_inc = (ps_dec->u1_recon_mb_grp << 2) - 1 - (u1_mbaff << 2); ps_dec->s_tran_addrecon.u2_mv_left_inc = ((ps_dec->u1_recon_mb_grp >> u1_mbaff) - 1) << (4 + u1_mbaff); } /**********************************************************************/ /* High profile related initialization at pictrue level */ /**********************************************************************/ if(ps_seq->u1_profile_idc == HIGH_PROFILE_IDC) { if((ps_seq->i4_seq_scaling_matrix_present_flag) || (ps_pps->i4_pic_scaling_matrix_present_flag)) { ih264d_form_scaling_matrix_picture(ps_seq, ps_pps, ps_dec); ps_dec->s_high_profile.u1_scaling_present = 1; } else { ih264d_form_default_scaling_matrix(ps_dec); } if(ps_pps->i4_transform_8x8_mode_flag) { ps_dec->s_high_profile.u1_transform8x8_present = 1; } } else { ih264d_form_default_scaling_matrix(ps_dec); } /* required while reading the transform_size_8x8 u4_flag */ ps_dec->s_high_profile.u1_direct_8x8_inference_flag = ps_seq->u1_direct_8x8_inference_flag; ps_dec->s_high_profile.s_cavlc_ctxt = ps_dec->s_cavlc_ctxt; ps_dec->i1_recon_in_thread3_flag = 1; ps_dec->ps_frame_buf_ip_recon = &ps_dec->s_tran_addrecon; if(ps_dec->u1_separate_parse) { memcpy(&ps_dec->s_tran_addrecon_parse, &ps_dec->s_tran_addrecon, sizeof(tfr_ctxt_t)); if(ps_dec->u4_num_cores >= 3 && ps_dec->i1_recon_in_thread3_flag) { memcpy(&ps_dec->s_tran_iprecon, &ps_dec->s_tran_addrecon, sizeof(tfr_ctxt_t)); ps_dec->ps_frame_buf_ip_recon = &ps_dec->s_tran_iprecon; } } ih264d_init_deblk_tfr_ctxt(ps_dec,&(ps_dec->s_pad_mgr), &(ps_dec->s_tran_addrecon), ps_dec->u2_frm_wd_in_mbs, 0); ps_dec->ps_cur_deblk_mb = ps_dec->ps_deblk_pic; ps_dec->u4_cur_deblk_mb_num = 0; ps_dec->u4_deblk_mb_x = 0; ps_dec->u4_deblk_mb_y = 0; ps_dec->pu4_wt_ofsts = ps_dec->pu4_wts_ofsts_mat; H264_MUTEX_UNLOCK(&ps_dec->process_disp_mutex); return OK; } Commit Message: Decoder: Initialize default reference buffers for all pictures Reference buffer is now initialized to default value for each pic before decoding the first slice in the pic Bug: 34097866 Change-Id: Id64b123af2188217ce833f11db0e6c0681d41dfd CWE ID: CWE-119
WORD32 ih264d_start_of_pic(dec_struct_t *ps_dec, WORD32 i4_poc, pocstruct_t *ps_temp_poc, UWORD16 u2_frame_num, dec_pic_params_t *ps_pps) { pocstruct_t *ps_prev_poc = &ps_dec->s_cur_pic_poc; pocstruct_t *ps_cur_poc = ps_temp_poc; pic_buffer_t *pic_buf; ivd_video_decode_op_t * ps_dec_output = (ivd_video_decode_op_t *)ps_dec->pv_dec_out; dec_slice_params_t *ps_cur_slice = ps_dec->ps_cur_slice; dec_seq_params_t *ps_seq = ps_pps->ps_sps; UWORD8 u1_bottom_field_flag = ps_cur_slice->u1_bottom_field_flag; UWORD8 u1_field_pic_flag = ps_cur_slice->u1_field_pic_flag; /* high profile related declarations */ high_profile_tools_t s_high_profile; WORD32 ret; H264_MUTEX_LOCK(&ps_dec->process_disp_mutex); ps_prev_poc->i4_pic_order_cnt_lsb = ps_cur_poc->i4_pic_order_cnt_lsb; ps_prev_poc->i4_pic_order_cnt_msb = ps_cur_poc->i4_pic_order_cnt_msb; ps_prev_poc->i4_delta_pic_order_cnt_bottom = ps_cur_poc->i4_delta_pic_order_cnt_bottom; ps_prev_poc->i4_delta_pic_order_cnt[0] = ps_cur_poc->i4_delta_pic_order_cnt[0]; ps_prev_poc->i4_delta_pic_order_cnt[1] = ps_cur_poc->i4_delta_pic_order_cnt[1]; ps_prev_poc->u1_bot_field = ps_dec->ps_cur_slice->u1_bottom_field_flag; ps_prev_poc->i4_prev_frame_num_ofst = ps_cur_poc->i4_prev_frame_num_ofst; ps_prev_poc->u2_frame_num = u2_frame_num; ps_dec->i1_prev_mb_qp_delta = 0; ps_dec->i1_next_ctxt_idx = 0; ps_dec->u4_nmb_deblk = 0; if(ps_dec->u4_num_cores == 1) ps_dec->u4_nmb_deblk = 1; if(ps_seq->u1_mb_aff_flag == 1) { ps_dec->u4_nmb_deblk = 0; if(ps_dec->u4_num_cores > 2) ps_dec->u4_num_cores = 2; } ps_dec->u4_use_intrapred_line_copy = 0; if (ps_seq->u1_mb_aff_flag == 0) { ps_dec->u4_use_intrapred_line_copy = 1; } ps_dec->u4_app_disable_deblk_frm = 0; /* If degrade is enabled, set the degrade flags appropriately */ if(ps_dec->i4_degrade_type && ps_dec->i4_degrade_pics) { WORD32 degrade_pic; ps_dec->i4_degrade_pic_cnt++; degrade_pic = 0; /* If degrade is to be done in all frames, then do not check further */ switch(ps_dec->i4_degrade_pics) { case 4: { degrade_pic = 1; break; } case 3: { if(ps_cur_slice->u1_slice_type != I_SLICE) degrade_pic = 1; break; } case 2: { /* If pic count hits non-degrade interval or it is an islice, then do not degrade */ if((ps_cur_slice->u1_slice_type != I_SLICE) && (ps_dec->i4_degrade_pic_cnt != ps_dec->i4_nondegrade_interval)) degrade_pic = 1; break; } case 1: { /* Check if the current picture is non-ref */ if(0 == ps_cur_slice->u1_nal_ref_idc) { degrade_pic = 1; } break; } } if(degrade_pic) { if(ps_dec->i4_degrade_type & 0x2) ps_dec->u4_app_disable_deblk_frm = 1; /* MC degrading is done only for non-ref pictures */ if(0 == ps_cur_slice->u1_nal_ref_idc) { if(ps_dec->i4_degrade_type & 0x4) ps_dec->i4_mv_frac_mask = 0; if(ps_dec->i4_degrade_type & 0x8) ps_dec->i4_mv_frac_mask = 0; } } else ps_dec->i4_degrade_pic_cnt = 0; } { dec_err_status_t * ps_err = ps_dec->ps_dec_err_status; if(ps_dec->u1_sl_typ_5_9 && ((ps_cur_slice->u1_slice_type == I_SLICE) || (ps_cur_slice->u1_slice_type == SI_SLICE))) ps_err->u1_cur_pic_type = PIC_TYPE_I; else ps_err->u1_cur_pic_type = PIC_TYPE_UNKNOWN; if(ps_err->u1_pic_aud_i == PIC_TYPE_I) { ps_err->u1_cur_pic_type = PIC_TYPE_I; ps_err->u1_pic_aud_i = PIC_TYPE_UNKNOWN; } if(ps_cur_slice->u1_nal_unit_type == IDR_SLICE_NAL) { if(ps_err->u1_err_flag) ih264d_reset_ref_bufs(ps_dec->ps_dpb_mgr); ps_err->u1_err_flag = ACCEPT_ALL_PICS; } } if(ps_dec->u1_init_dec_flag && ps_dec->s_prev_seq_params.u1_eoseq_pending) { /* Reset the decoder picture buffers */ WORD32 j; for(j = 0; j < MAX_DISP_BUFS_NEW; j++) { ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_pic_buf_mgr, j, BUF_MGR_REF); ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_mv_buf_mgr, ps_dec->au1_pic_buf_id_mv_buf_id_map[j], BUF_MGR_REF); ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_pic_buf_mgr, j, BUF_MGR_IO); } /* reset the decoder structure parameters related to buffer handling */ ps_dec->u1_second_field = 0; ps_dec->i4_cur_display_seq = 0; /********************************************************************/ /* indicate in the decoder output i4_status that some frames are being */ /* dropped, so that it resets timestamp and wait for a new sequence */ /********************************************************************/ ps_dec->s_prev_seq_params.u1_eoseq_pending = 0; } ret = ih264d_init_pic(ps_dec, u2_frame_num, i4_poc, ps_pps); if(ret != OK) return ret; ps_dec->pv_parse_tu_coeff_data = ps_dec->pv_pic_tu_coeff_data; ps_dec->pv_proc_tu_coeff_data = ps_dec->pv_pic_tu_coeff_data; ps_dec->ps_nmb_info = ps_dec->ps_frm_mb_info; if(ps_dec->u1_separate_parse) { UWORD16 pic_wd = ps_dec->u4_width_at_init; UWORD16 pic_ht = ps_dec->u4_height_at_init; UWORD32 num_mbs; if((NULL != ps_dec->ps_cur_sps) && (1 == (ps_dec->ps_cur_sps->u1_is_valid))) { pic_wd = ps_dec->u2_pic_wd; pic_ht = ps_dec->u2_pic_ht; } num_mbs = (pic_wd * pic_ht) >> 8; if(ps_dec->pu1_dec_mb_map) { memset((void *)ps_dec->pu1_dec_mb_map, 0, num_mbs); } if(ps_dec->pu1_recon_mb_map) { memset((void *)ps_dec->pu1_recon_mb_map, 0, num_mbs); } if(ps_dec->pu2_slice_num_map) { memset((void *)ps_dec->pu2_slice_num_map, 0, (num_mbs * sizeof(UWORD16))); } } ps_dec->ps_parse_cur_slice = &(ps_dec->ps_dec_slice_buf[0]); ps_dec->ps_decode_cur_slice = &(ps_dec->ps_dec_slice_buf[0]); ps_dec->ps_computebs_cur_slice = &(ps_dec->ps_dec_slice_buf[0]); ps_dec->u2_cur_slice_num = 0; /* Initialize all the HP toolsets to zero */ ps_dec->s_high_profile.u1_scaling_present = 0; ps_dec->s_high_profile.u1_transform8x8_present = 0; /* Get Next Free Picture */ if(1 == ps_dec->u4_share_disp_buf) { UWORD32 i; /* Free any buffer that is in the queue to be freed */ for(i = 0; i < MAX_DISP_BUFS_NEW; i++) { if(0 == ps_dec->u4_disp_buf_to_be_freed[i]) continue; ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_pic_buf_mgr, i, BUF_MGR_IO); ps_dec->u4_disp_buf_to_be_freed[i] = 0; ps_dec->u4_disp_buf_mapping[i] = 0; } } if(!(u1_field_pic_flag && 0 != ps_dec->u1_top_bottom_decoded)) //ps_dec->u1_second_field)) { pic_buffer_t *ps_cur_pic; WORD32 cur_pic_buf_id, cur_mv_buf_id; col_mv_buf_t *ps_col_mv; while(1) { ps_cur_pic = (pic_buffer_t *)ih264_buf_mgr_get_next_free( (buf_mgr_t *)ps_dec->pv_pic_buf_mgr, &cur_pic_buf_id); if(ps_cur_pic == NULL) { ps_dec->i4_error_code = ERROR_UNAVAIL_PICBUF_T; return ERROR_UNAVAIL_PICBUF_T; } if(0 == ps_dec->u4_disp_buf_mapping[cur_pic_buf_id]) { break; } } ps_col_mv = (col_mv_buf_t *)ih264_buf_mgr_get_next_free((buf_mgr_t *)ps_dec->pv_mv_buf_mgr, &cur_mv_buf_id); if(ps_col_mv == NULL) { ps_dec->i4_error_code = ERROR_UNAVAIL_MVBUF_T; return ERROR_UNAVAIL_MVBUF_T; } ps_dec->ps_cur_pic = ps_cur_pic; ps_dec->u1_pic_buf_id = cur_pic_buf_id; ps_cur_pic->u4_ts = ps_dec->u4_ts; ps_cur_pic->u1_mv_buf_id = cur_mv_buf_id; ps_dec->au1_pic_buf_id_mv_buf_id_map[cur_pic_buf_id] = cur_mv_buf_id; ps_cur_pic->pu1_col_zero_flag = (UWORD8 *)ps_col_mv->pv_col_zero_flag; ps_cur_pic->ps_mv = (mv_pred_t *)ps_col_mv->pv_mv; ps_dec->au1_pic_buf_ref_flag[cur_pic_buf_id] = 0; { /*make first entry of list0 point to cur pic,so that if first Islice is in error, ref pic struct will have valid entries*/ ps_dec->ps_ref_pic_buf_lx[0] = ps_dec->ps_dpb_mgr->ps_init_dpb[0]; *(ps_dec->ps_dpb_mgr->ps_init_dpb[0][0]) = *ps_cur_pic; /* Initialize for field reference as well */ *(ps_dec->ps_dpb_mgr->ps_init_dpb[0][MAX_REF_BUFS]) = *ps_cur_pic; } if(!ps_dec->ps_cur_pic) { WORD32 j; H264_DEC_DEBUG_PRINT("------- Display Buffers Reset --------\n"); for(j = 0; j < MAX_DISP_BUFS_NEW; j++) { ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_pic_buf_mgr, j, BUF_MGR_REF); ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_mv_buf_mgr, ps_dec->au1_pic_buf_id_mv_buf_id_map[j], BUF_MGR_REF); ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_pic_buf_mgr, j, BUF_MGR_IO); } ps_dec->i4_cur_display_seq = 0; ps_dec->i4_prev_max_display_seq = 0; ps_dec->i4_max_poc = 0; ps_cur_pic = (pic_buffer_t *)ih264_buf_mgr_get_next_free( (buf_mgr_t *)ps_dec->pv_pic_buf_mgr, &cur_pic_buf_id); if(ps_cur_pic == NULL) { ps_dec->i4_error_code = ERROR_UNAVAIL_PICBUF_T; return ERROR_UNAVAIL_PICBUF_T; } ps_col_mv = (col_mv_buf_t *)ih264_buf_mgr_get_next_free((buf_mgr_t *)ps_dec->pv_mv_buf_mgr, &cur_mv_buf_id); if(ps_col_mv == NULL) { ps_dec->i4_error_code = ERROR_UNAVAIL_MVBUF_T; return ERROR_UNAVAIL_MVBUF_T; } ps_dec->ps_cur_pic = ps_cur_pic; ps_dec->u1_pic_buf_id = cur_pic_buf_id; ps_cur_pic->u4_ts = ps_dec->u4_ts; ps_dec->apv_buf_id_pic_buf_map[cur_pic_buf_id] = (void *)ps_cur_pic; ps_cur_pic->u1_mv_buf_id = cur_mv_buf_id; ps_dec->au1_pic_buf_id_mv_buf_id_map[cur_pic_buf_id] = cur_mv_buf_id; ps_cur_pic->pu1_col_zero_flag = (UWORD8 *)ps_col_mv->pv_col_zero_flag; ps_cur_pic->ps_mv = (mv_pred_t *)ps_col_mv->pv_mv; ps_dec->au1_pic_buf_ref_flag[cur_pic_buf_id] = 0; } ps_dec->ps_cur_pic->u1_picturetype = u1_field_pic_flag; ps_dec->ps_cur_pic->u4_pack_slc_typ = SKIP_NONE; H264_DEC_DEBUG_PRINT("got a buffer\n"); } else { H264_DEC_DEBUG_PRINT("did not get a buffer\n"); } ps_dec->u4_pic_buf_got = 1; ps_dec->ps_cur_pic->i4_poc = i4_poc; ps_dec->ps_cur_pic->i4_frame_num = u2_frame_num; ps_dec->ps_cur_pic->i4_pic_num = u2_frame_num; ps_dec->ps_cur_pic->i4_top_field_order_cnt = ps_pps->i4_top_field_order_cnt; ps_dec->ps_cur_pic->i4_bottom_field_order_cnt = ps_pps->i4_bottom_field_order_cnt; ps_dec->ps_cur_pic->i4_avg_poc = ps_pps->i4_avg_poc; ps_dec->ps_cur_pic->u4_time_stamp = ps_dec->u4_pts; ps_dec->s_cur_pic = *(ps_dec->ps_cur_pic); if(u1_field_pic_flag && u1_bottom_field_flag) { WORD32 i4_temp_poc; WORD32 i4_top_field_order_poc, i4_bot_field_order_poc; /* Point to odd lines, since it's bottom field */ ps_dec->s_cur_pic.pu1_buf1 += ps_dec->s_cur_pic.u2_frm_wd_y; ps_dec->s_cur_pic.pu1_buf2 += ps_dec->s_cur_pic.u2_frm_wd_uv; ps_dec->s_cur_pic.pu1_buf3 += ps_dec->s_cur_pic.u2_frm_wd_uv; ps_dec->s_cur_pic.ps_mv += ((ps_dec->u2_pic_ht * ps_dec->u2_pic_wd) >> 5); ps_dec->s_cur_pic.pu1_col_zero_flag += ((ps_dec->u2_pic_ht * ps_dec->u2_pic_wd) >> 5); ps_dec->ps_cur_pic->u1_picturetype |= BOT_FLD; i4_top_field_order_poc = ps_dec->ps_cur_pic->i4_top_field_order_cnt; i4_bot_field_order_poc = ps_dec->ps_cur_pic->i4_bottom_field_order_cnt; i4_temp_poc = MIN(i4_top_field_order_poc, i4_bot_field_order_poc); ps_dec->ps_cur_pic->i4_avg_poc = i4_temp_poc; } ps_cur_slice->u1_mbaff_frame_flag = ps_seq->u1_mb_aff_flag && (!u1_field_pic_flag); ps_dec->ps_cur_pic->u1_picturetype |= (ps_cur_slice->u1_mbaff_frame_flag << 2); ps_dec->ps_cur_mb_row = ps_dec->ps_nbr_mb_row; //[0]; ps_dec->ps_cur_mb_row++; //Increment by 1 ,so that left mb will always be valid ps_dec->ps_top_mb_row = ps_dec->ps_nbr_mb_row + ((ps_dec->u2_frm_wd_in_mbs + 1) << (1 - ps_dec->ps_cur_sps->u1_frame_mbs_only_flag)); ps_dec->ps_top_mb_row++; //Increment by 1 ,so that left mb will always be valid ps_dec->pu1_y = ps_dec->pu1_y_scratch[0]; ps_dec->pu1_u = ps_dec->pu1_u_scratch[0]; ps_dec->pu1_v = ps_dec->pu1_v_scratch[0]; ps_dec->u1_yuv_scratch_idx = 0; /* CHANGED CODE */ ps_dec->ps_mv_cur = ps_dec->s_cur_pic.ps_mv; ps_dec->ps_mv_top = ps_dec->ps_mv_top_p[0]; /* CHANGED CODE */ ps_dec->u1_mv_top_p = 0; ps_dec->u1_mb_idx = 0; /* CHANGED CODE */ ps_dec->ps_mv_left = ps_dec->s_cur_pic.ps_mv; ps_dec->pu1_yleft = 0; ps_dec->pu1_uleft = 0; ps_dec->pu1_vleft = 0; ps_dec->u1_not_wait_rec = 2; ps_dec->u2_total_mbs_coded = 0; ps_dec->i4_submb_ofst = -(SUB_BLK_SIZE); ps_dec->u4_pred_info_idx = 0; ps_dec->u4_pred_info_pkd_idx = 0; ps_dec->u4_dma_buf_idx = 0; ps_dec->ps_mv = ps_dec->s_cur_pic.ps_mv; ps_dec->ps_mv_bank_cur = ps_dec->s_cur_pic.ps_mv; ps_dec->pu1_col_zero_flag = ps_dec->s_cur_pic.pu1_col_zero_flag; ps_dec->ps_part = ps_dec->ps_parse_part_params; ps_dec->i2_prev_slice_mbx = -1; ps_dec->i2_prev_slice_mby = 0; ps_dec->u2_mv_2mb[0] = 0; ps_dec->u2_mv_2mb[1] = 0; ps_dec->u1_last_pic_not_decoded = 0; ps_dec->u2_cur_slice_num_dec_thread = 0; ps_dec->u2_cur_slice_num_bs = 0; ps_dec->u4_intra_pred_line_ofst = 0; ps_dec->pu1_cur_y_intra_pred_line = ps_dec->pu1_y_intra_pred_line; ps_dec->pu1_cur_u_intra_pred_line = ps_dec->pu1_u_intra_pred_line; ps_dec->pu1_cur_v_intra_pred_line = ps_dec->pu1_v_intra_pred_line; ps_dec->pu1_cur_y_intra_pred_line_base = ps_dec->pu1_y_intra_pred_line; ps_dec->pu1_cur_u_intra_pred_line_base = ps_dec->pu1_u_intra_pred_line; ps_dec->pu1_cur_v_intra_pred_line_base = ps_dec->pu1_v_intra_pred_line; ps_dec->pu1_prev_y_intra_pred_line = ps_dec->pu1_y_intra_pred_line + (ps_dec->u2_frm_wd_in_mbs * MB_SIZE); ps_dec->pu1_prev_u_intra_pred_line = ps_dec->pu1_u_intra_pred_line + ps_dec->u2_frm_wd_in_mbs * BLK8x8SIZE * YUV420SP_FACTOR; ps_dec->pu1_prev_v_intra_pred_line = ps_dec->pu1_v_intra_pred_line + ps_dec->u2_frm_wd_in_mbs * BLK8x8SIZE; ps_dec->ps_deblk_mbn = ps_dec->ps_deblk_pic; ps_dec->ps_deblk_mbn_curr = ps_dec->ps_deblk_mbn; ps_dec->ps_deblk_mbn_prev = ps_dec->ps_deblk_mbn + ps_dec->u1_recon_mb_grp; /* Initialize The Function Pointer Depending Upon the Entropy and MbAff Flag */ { if(ps_cur_slice->u1_mbaff_frame_flag) { ps_dec->pf_compute_bs = ih264d_compute_bs_mbaff; ps_dec->pf_mvpred = ih264d_mvpred_mbaff; } else { ps_dec->pf_compute_bs = ih264d_compute_bs_non_mbaff; ps_dec->u1_cur_mb_fld_dec_flag = ps_cur_slice->u1_field_pic_flag; } } /* Set up the Parameter for DMA transfer */ { UWORD8 u1_field_pic_flag = ps_dec->ps_cur_slice->u1_field_pic_flag; UWORD8 u1_mbaff = ps_cur_slice->u1_mbaff_frame_flag; UWORD8 uc_lastmbs = (((ps_dec->u2_pic_wd) >> 4) % (ps_dec->u1_recon_mb_grp >> u1_mbaff)); UWORD16 ui16_lastmbs_widthY = (uc_lastmbs ? (uc_lastmbs << 4) : ((ps_dec->u1_recon_mb_grp >> u1_mbaff) << 4)); UWORD16 ui16_lastmbs_widthUV = uc_lastmbs ? (uc_lastmbs << 3) : ((ps_dec->u1_recon_mb_grp >> u1_mbaff) << 3); ps_dec->s_tran_addrecon.pu1_dest_y = ps_dec->s_cur_pic.pu1_buf1; ps_dec->s_tran_addrecon.pu1_dest_u = ps_dec->s_cur_pic.pu1_buf2; ps_dec->s_tran_addrecon.pu1_dest_v = ps_dec->s_cur_pic.pu1_buf3; ps_dec->s_tran_addrecon.u2_frm_wd_y = ps_dec->u2_frm_wd_y << u1_field_pic_flag; ps_dec->s_tran_addrecon.u2_frm_wd_uv = ps_dec->u2_frm_wd_uv << u1_field_pic_flag; if(u1_field_pic_flag) { ui16_lastmbs_widthY += ps_dec->u2_frm_wd_y; ui16_lastmbs_widthUV += ps_dec->u2_frm_wd_uv; } /* Normal Increment of Pointer */ ps_dec->s_tran_addrecon.u4_inc_y[0] = ((ps_dec->u1_recon_mb_grp << 4) >> u1_mbaff); ps_dec->s_tran_addrecon.u4_inc_uv[0] = ((ps_dec->u1_recon_mb_grp << 4) >> u1_mbaff); /* End of Row Increment */ ps_dec->s_tran_addrecon.u4_inc_y[1] = (ui16_lastmbs_widthY + (PAD_LEN_Y_H << 1) + ps_dec->s_tran_addrecon.u2_frm_wd_y * ((15 << u1_mbaff) + u1_mbaff)); ps_dec->s_tran_addrecon.u4_inc_uv[1] = (ui16_lastmbs_widthUV + (PAD_LEN_UV_H << 2) + ps_dec->s_tran_addrecon.u2_frm_wd_uv * ((15 << u1_mbaff) + u1_mbaff)); /* Assign picture numbers to each frame/field */ /* only once per picture. */ ih264d_assign_pic_num(ps_dec); ps_dec->s_tran_addrecon.u2_mv_top_left_inc = (ps_dec->u1_recon_mb_grp << 2) - 1 - (u1_mbaff << 2); ps_dec->s_tran_addrecon.u2_mv_left_inc = ((ps_dec->u1_recon_mb_grp >> u1_mbaff) - 1) << (4 + u1_mbaff); } /**********************************************************************/ /* High profile related initialization at pictrue level */ /**********************************************************************/ if(ps_seq->u1_profile_idc == HIGH_PROFILE_IDC) { if((ps_seq->i4_seq_scaling_matrix_present_flag) || (ps_pps->i4_pic_scaling_matrix_present_flag)) { ih264d_form_scaling_matrix_picture(ps_seq, ps_pps, ps_dec); ps_dec->s_high_profile.u1_scaling_present = 1; } else { ih264d_form_default_scaling_matrix(ps_dec); } if(ps_pps->i4_transform_8x8_mode_flag) { ps_dec->s_high_profile.u1_transform8x8_present = 1; } } else { ih264d_form_default_scaling_matrix(ps_dec); } /* required while reading the transform_size_8x8 u4_flag */ ps_dec->s_high_profile.u1_direct_8x8_inference_flag = ps_seq->u1_direct_8x8_inference_flag; ps_dec->s_high_profile.s_cavlc_ctxt = ps_dec->s_cavlc_ctxt; ps_dec->i1_recon_in_thread3_flag = 1; ps_dec->ps_frame_buf_ip_recon = &ps_dec->s_tran_addrecon; if(ps_dec->u1_separate_parse) { memcpy(&ps_dec->s_tran_addrecon_parse, &ps_dec->s_tran_addrecon, sizeof(tfr_ctxt_t)); if(ps_dec->u4_num_cores >= 3 && ps_dec->i1_recon_in_thread3_flag) { memcpy(&ps_dec->s_tran_iprecon, &ps_dec->s_tran_addrecon, sizeof(tfr_ctxt_t)); ps_dec->ps_frame_buf_ip_recon = &ps_dec->s_tran_iprecon; } } ih264d_init_deblk_tfr_ctxt(ps_dec,&(ps_dec->s_pad_mgr), &(ps_dec->s_tran_addrecon), ps_dec->u2_frm_wd_in_mbs, 0); ps_dec->ps_cur_deblk_mb = ps_dec->ps_deblk_pic; ps_dec->u4_cur_deblk_mb_num = 0; ps_dec->u4_deblk_mb_x = 0; ps_dec->u4_deblk_mb_y = 0; ps_dec->pu4_wt_ofsts = ps_dec->pu4_wts_ofsts_mat; H264_MUTEX_UNLOCK(&ps_dec->process_disp_mutex); return OK; }
174,048
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: mcs_parse_domain_params(STREAM s) { int length; ber_parse_header(s, MCS_TAG_DOMAIN_PARAMS, &length); in_uint8s(s, length); return s_check(s); } Commit Message: Malicious RDP server security fixes This commit includes fixes for a set of 21 vulnerabilities in rdesktop when a malicious RDP server is used. All vulnerabilities was identified and reported by Eyal Itkin. * Add rdp_protocol_error function that is used in several fixes * Refactor of process_bitmap_updates * Fix possible integer overflow in s_check_rem() on 32bit arch * Fix memory corruption in process_bitmap_data - CVE-2018-8794 * Fix remote code execution in process_bitmap_data - CVE-2018-8795 * Fix remote code execution in process_plane - CVE-2018-8797 * Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175 * Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175 * Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176 * Fix Denial of Service in sec_recv - CVE-2018-20176 * Fix minor information leak in rdpdr_process - CVE-2018-8791 * Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792 * Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793 * Fix Denial of Service in process_bitmap_data - CVE-2018-8796 * Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798 * Fix Denial of Service in process_secondary_order - CVE-2018-8799 * Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800 * Fix major information leak in ui_clip_handle_data - CVE-2018-20174 * Fix memory corruption in rdp_in_unistr - CVE-2018-20177 * Fix Denial of Service in process_demand_active - CVE-2018-20178 * Fix remote code execution in lspci_process - CVE-2018-20179 * Fix remote code execution in rdpsnddbg_process - CVE-2018-20180 * Fix remote code execution in seamless_process - CVE-2018-20181 * Fix remote code execution in seamless_process_line - CVE-2018-20182 CWE ID: CWE-119
mcs_parse_domain_params(STREAM s) { uint32 length; struct stream packet = *s; ber_parse_header(s, MCS_TAG_DOMAIN_PARAMS, &length); if (!s_check_rem(s, length)) { rdp_protocol_error("mcs_parse_domain_params(), consume domain params from stream would overrun", &packet); } in_uint8s(s, length); return s_check(s); }
169,799
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static bool states_equal(struct bpf_verifier_env *env, struct bpf_verifier_state *old, struct bpf_verifier_state *cur) { int i; if (old->curframe != cur->curframe) return false; /* for states to be equal callsites have to be the same * and all frame states need to be equivalent */ for (i = 0; i <= old->curframe; i++) { if (old->frame[i]->callsite != cur->frame[i]->callsite) return false; if (!func_states_equal(old->frame[i], cur->frame[i])) return false; } return true; } Commit Message: bpf: prevent out of bounds speculation on pointer arithmetic Jann reported that the original commit back in b2157399cc98 ("bpf: prevent out-of-bounds speculation") was not sufficient to stop CPU from speculating out of bounds memory access: While b2157399cc98 only focussed on masking array map access for unprivileged users for tail calls and data access such that the user provided index gets sanitized from BPF program and syscall side, there is still a more generic form affected from BPF programs that applies to most maps that hold user data in relation to dynamic map access when dealing with unknown scalars or "slow" known scalars as access offset, for example: - Load a map value pointer into R6 - Load an index into R7 - Do a slow computation (e.g. with a memory dependency) that loads a limit into R8 (e.g. load the limit from a map for high latency, then mask it to make the verifier happy) - Exit if R7 >= R8 (mispredicted branch) - Load R0 = R6[R7] - Load R0 = R6[R0] For unknown scalars there are two options in the BPF verifier where we could derive knowledge from in order to guarantee safe access to the memory: i) While </>/<=/>= variants won't allow to derive any lower or upper bounds from the unknown scalar where it would be safe to add it to the map value pointer, it is possible through ==/!= test however. ii) another option is to transform the unknown scalar into a known scalar, for example, through ALU ops combination such as R &= <imm> followed by R |= <imm> or any similar combination where the original information from the unknown scalar would be destroyed entirely leaving R with a constant. The initial slow load still precedes the latter ALU ops on that register, so the CPU executes speculatively from that point. Once we have the known scalar, any compare operation would work then. A third option only involving registers with known scalars could be crafted as described in [0] where a CPU port (e.g. Slow Int unit) would be filled with many dependent computations such that the subsequent condition depending on its outcome has to wait for evaluation on its execution port and thereby executing speculatively if the speculated code can be scheduled on a different execution port, or any other form of mistraining as described in [1], for example. Given this is not limited to only unknown scalars, not only map but also stack access is affected since both is accessible for unprivileged users and could potentially be used for out of bounds access under speculation. In order to prevent any of these cases, the verifier is now sanitizing pointer arithmetic on the offset such that any out of bounds speculation would be masked in a way where the pointer arithmetic result in the destination register will stay unchanged, meaning offset masked into zero similar as in array_index_nospec() case. With regards to implementation, there are three options that were considered: i) new insn for sanitation, ii) push/pop insn and sanitation as inlined BPF, iii) reuse of ax register and sanitation as inlined BPF. Option i) has the downside that we end up using from reserved bits in the opcode space, but also that we would require each JIT to emit masking as native arch opcodes meaning mitigation would have slow adoption till everyone implements it eventually which is counter-productive. Option ii) and iii) have both in common that a temporary register is needed in order to implement the sanitation as inlined BPF since we are not allowed to modify the source register. While a push / pop insn in ii) would be useful to have in any case, it requires once again that every JIT needs to implement it first. While possible, amount of changes needed would also be unsuitable for a -stable patch. Therefore, the path which has fewer changes, less BPF instructions for the mitigation and does not require anything to be changed in the JITs is option iii) which this work is pursuing. The ax register is already mapped to a register in all JITs (modulo arm32 where it's mapped to stack as various other BPF registers there) and used in constant blinding for JITs-only so far. It can be reused for verifier rewrites under certain constraints. The interpreter's tmp "register" has therefore been remapped into extending the register set with hidden ax register and reusing that for a number of instructions that needed the prior temporary variable internally (e.g. div, mod). This allows for zero increase in stack space usage in the interpreter, and enables (restricted) generic use in rewrites otherwise as long as such a patchlet does not make use of these instructions. The sanitation mask is dynamic and relative to the offset the map value or stack pointer currently holds. There are various cases that need to be taken under consideration for the masking, e.g. such operation could look as follows: ptr += val or val += ptr or ptr -= val. Thus, the value to be sanitized could reside either in source or in destination register, and the limit is different depending on whether the ALU op is addition or subtraction and depending on the current known and bounded offset. The limit is derived as follows: limit := max_value_size - (smin_value + off). For subtraction: limit := umax_value + off. This holds because we do not allow any pointer arithmetic that would temporarily go out of bounds or would have an unknown value with mixed signed bounds where it is unclear at verification time whether the actual runtime value would be either negative or positive. For example, we have a derived map pointer value with constant offset and bounded one, so limit based on smin_value works because the verifier requires that statically analyzed arithmetic on the pointer must be in bounds, and thus it checks if resulting smin_value + off and umax_value + off is still within map value bounds at time of arithmetic in addition to time of access. Similarly, for the case of stack access we derive the limit as follows: MAX_BPF_STACK + off for subtraction and -off for the case of addition where off := ptr_reg->off + ptr_reg->var_off.value. Subtraction is a special case for the masking which can be in form of ptr += -val, ptr -= -val, or ptr -= val. In the first two cases where we know that the value is negative, we need to temporarily negate the value in order to do the sanitation on a positive value where we later swap the ALU op, and restore original source register if the value was in source. The sanitation of pointer arithmetic alone is still not fully sufficient as is, since a scenario like the following could happen ... PTR += 0x1000 (e.g. K-based imm) PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON PTR += 0x1000 PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON [...] ... which under speculation could end up as ... PTR += 0x1000 PTR -= 0 [ truncated by mitigation ] PTR += 0x1000 PTR -= 0 [ truncated by mitigation ] [...] ... and therefore still access out of bounds. To prevent such case, the verifier is also analyzing safety for potential out of bounds access under speculative execution. Meaning, it is also simulating pointer access under truncation. We therefore "branch off" and push the current verification state after the ALU operation with known 0 to the verification stack for later analysis. Given the current path analysis succeeded it is likely that the one under speculation can be pruned. In any case, it is also subject to existing complexity limits and therefore anything beyond this point will be rejected. In terms of pruning, it needs to be ensured that the verification state from speculative execution simulation must never prune a non-speculative execution path, therefore, we mark verifier state accordingly at the time of push_stack(). If verifier detects out of bounds access under speculative execution from one of the possible paths that includes a truncation, it will reject such program. Given we mask every reg-based pointer arithmetic for unprivileged programs, we've been looking into how it could affect real-world programs in terms of size increase. As the majority of programs are targeted for privileged-only use case, we've unconditionally enabled masking (with its alu restrictions on top of it) for privileged programs for the sake of testing in order to check i) whether they get rejected in its current form, and ii) by how much the number of instructions and size will increase. We've tested this by using Katran, Cilium and test_l4lb from the kernel selftests. For Katran we've evaluated balancer_kern.o, Cilium bpf_lxc.o and an older test object bpf_lxc_opt_-DUNKNOWN.o and l4lb we've used test_l4lb.o as well as test_l4lb_noinline.o. We found that none of the programs got rejected by the verifier with this change, and that impact is rather minimal to none. balancer_kern.o had 13,904 bytes (1,738 insns) xlated and 7,797 bytes JITed before and after the change. Most complex program in bpf_lxc.o had 30,544 bytes (3,817 insns) xlated and 18,538 bytes JITed before and after and none of the other tail call programs in bpf_lxc.o had any changes either. For the older bpf_lxc_opt_-DUNKNOWN.o object we found a small increase from 20,616 bytes (2,576 insns) and 12,536 bytes JITed before to 20,664 bytes (2,582 insns) and 12,558 bytes JITed after the change. Other programs from that object file had similar small increase. Both test_l4lb.o had no change and remained at 6,544 bytes (817 insns) xlated and 3,401 bytes JITed and for test_l4lb_noinline.o constant at 5,080 bytes (634 insns) xlated and 3,313 bytes JITed. This can be explained in that LLVM typically optimizes stack based pointer arithmetic by using K-based operations and that use of dynamic map access is not overly frequent. However, in future we may decide to optimize the algorithm further under known guarantees from branch and value speculation. Latter seems also unclear in terms of prediction heuristics that today's CPUs apply as well as whether there could be collisions in e.g. the predictor's Value History/Pattern Table for triggering out of bounds access, thus masking is performed unconditionally at this point but could be subject to relaxation later on. We were generally also brainstorming various other approaches for mitigation, but the blocker was always lack of available registers at runtime and/or overhead for runtime tracking of limits belonging to a specific pointer. Thus, we found this to be minimally intrusive under given constraints. With that in place, a simple example with sanitized access on unprivileged load at post-verification time looks as follows: # bpftool prog dump xlated id 282 [...] 28: (79) r1 = *(u64 *)(r7 +0) 29: (79) r2 = *(u64 *)(r7 +8) 30: (57) r1 &= 15 31: (79) r3 = *(u64 *)(r0 +4608) 32: (57) r3 &= 1 33: (47) r3 |= 1 34: (2d) if r2 > r3 goto pc+19 35: (b4) (u32) r11 = (u32) 20479 | 36: (1f) r11 -= r2 | Dynamic sanitation for pointer 37: (4f) r11 |= r2 | arithmetic with registers 38: (87) r11 = -r11 | containing bounded or known 39: (c7) r11 s>>= 63 | scalars in order to prevent 40: (5f) r11 &= r2 | out of bounds speculation. 41: (0f) r4 += r11 | 42: (71) r4 = *(u8 *)(r4 +0) 43: (6f) r4 <<= r1 [...] For the case where the scalar sits in the destination register as opposed to the source register, the following code is emitted for the above example: [...] 16: (b4) (u32) r11 = (u32) 20479 17: (1f) r11 -= r2 18: (4f) r11 |= r2 19: (87) r11 = -r11 20: (c7) r11 s>>= 63 21: (5f) r2 &= r11 22: (0f) r2 += r0 23: (61) r0 = *(u32 *)(r2 +0) [...] JIT blinding example with non-conflicting use of r10: [...] d5: je 0x0000000000000106 _ d7: mov 0x0(%rax),%edi | da: mov $0xf153246,%r10d | Index load from map value and e0: xor $0xf153259,%r10 | (const blinded) mask with 0x1f. e7: and %r10,%rdi |_ ea: mov $0x2f,%r10d | f0: sub %rdi,%r10 | Sanitized addition. Both use r10 f3: or %rdi,%r10 | but do not interfere with each f6: neg %r10 | other. (Neither do these instructions f9: sar $0x3f,%r10 | interfere with the use of ax as temp fd: and %r10,%rdi | in interpreter.) 100: add %rax,%rdi |_ 103: mov 0x0(%rdi),%eax [...] Tested that it fixes Jann's reproducer, and also checked that test_verifier and test_progs suite with interpreter, JIT and JIT with hardening enabled on x86-64 and arm64 runs successfully. [0] Speculose: Analyzing the Security Implications of Speculative Execution in CPUs, Giorgi Maisuradze and Christian Rossow, https://arxiv.org/pdf/1801.04084.pdf [1] A Systematic Evaluation of Transient Execution Attacks and Defenses, Claudio Canella, Jo Van Bulck, Michael Schwarz, Moritz Lipp, Benjamin von Berg, Philipp Ortner, Frank Piessens, Dmitry Evtyushkin, Daniel Gruss, https://arxiv.org/pdf/1811.05441.pdf Fixes: b2157399cc98 ("bpf: prevent out-of-bounds speculation") Reported-by: Jann Horn <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Alexei Starovoitov <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> CWE ID: CWE-189
static bool states_equal(struct bpf_verifier_env *env, struct bpf_verifier_state *old, struct bpf_verifier_state *cur) { int i; if (old->curframe != cur->curframe) return false; /* Verification state from speculative execution simulation * must never prune a non-speculative execution one. */ if (old->speculative && !cur->speculative) return false; /* for states to be equal callsites have to be the same * and all frame states need to be equivalent */ for (i = 0; i <= old->curframe; i++) { if (old->frame[i]->callsite != cur->frame[i]->callsite) return false; if (!func_states_equal(old->frame[i], cur->frame[i])) return false; } return true; }
170,245
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: MagickExport MagickBooleanType SetQuantumDepth(const Image *image, QuantumInfo *quantum_info,const size_t depth) { size_t extent, quantum; /* Allocate the quantum pixel buffer. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(quantum_info != (QuantumInfo *) NULL); assert(quantum_info->signature == MagickSignature); quantum_info->depth=depth; if (quantum_info->format == FloatingPointQuantumFormat) { if (quantum_info->depth > 32) quantum_info->depth=64; else if (quantum_info->depth > 16) quantum_info->depth=32; else quantum_info->depth=16; } if (quantum_info->pixels != (unsigned char **) NULL) DestroyQuantumPixels(quantum_info); quantum=(quantum_info->pad+6)*(quantum_info->depth+7)/8; extent=image->columns*quantum; if (quantum != (extent/image->columns)) return(MagickFalse); return(AcquireQuantumPixels(quantum_info,extent)); } Commit Message: https://github.com/ImageMagick/ImageMagick/issues/110 CWE ID: CWE-369
MagickExport MagickBooleanType SetQuantumDepth(const Image *image, QuantumInfo *quantum_info,const size_t depth) { size_t extent, quantum; /* Allocate the quantum pixel buffer. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(quantum_info != (QuantumInfo *) NULL); assert(quantum_info->signature == MagickSignature); quantum_info->depth=depth; if (quantum_info->format == FloatingPointQuantumFormat) { if (quantum_info->depth > 32) quantum_info->depth=64; else if (quantum_info->depth > 16) quantum_info->depth=32; else quantum_info->depth=16; } if (quantum_info->pixels != (unsigned char **) NULL) DestroyQuantumPixels(quantum_info); quantum=(quantum_info->pad+6)*(quantum_info->depth+7)/8; extent=image->columns*quantum; if ((image->columns != 0) && (quantum != (extent/image->columns))) return(MagickFalse); return(AcquireQuantumPixels(quantum_info,extent)); }
170,113
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int chmd_read_headers(struct mspack_system *sys, struct mspack_file *fh, struct mschmd_header *chm, int entire) { unsigned int section, name_len, x, errors, num_chunks; unsigned char buf[0x54], *chunk = NULL, *name, *p, *end; struct mschmd_file *fi, *link = NULL; off_t offset, length; int num_entries; /* initialise pointers */ chm->files = NULL; chm->sysfiles = NULL; chm->chunk_cache = NULL; chm->sec0.base.chm = chm; chm->sec0.base.id = 0; chm->sec1.base.chm = chm; chm->sec1.base.id = 1; chm->sec1.content = NULL; chm->sec1.control = NULL; chm->sec1.spaninfo = NULL; chm->sec1.rtable = NULL; /* read the first header */ if (sys->read(fh, &buf[0], chmhead_SIZEOF) != chmhead_SIZEOF) { return MSPACK_ERR_READ; } /* check ITSF signature */ if (EndGetI32(&buf[chmhead_Signature]) != 0x46535449) { return MSPACK_ERR_SIGNATURE; } /* check both header GUIDs */ if (mspack_memcmp(&buf[chmhead_GUID1], &guids[0], 32L) != 0) { D(("incorrect GUIDs")) return MSPACK_ERR_SIGNATURE; } chm->version = EndGetI32(&buf[chmhead_Version]); chm->timestamp = EndGetM32(&buf[chmhead_Timestamp]); chm->language = EndGetI32(&buf[chmhead_LanguageID]); if (chm->version > 3) { sys->message(fh, "WARNING; CHM version > 3"); } /* read the header section table */ if (sys->read(fh, &buf[0], chmhst3_SIZEOF) != chmhst3_SIZEOF) { return MSPACK_ERR_READ; } /* chmhst3_OffsetCS0 does not exist in version 1 or 2 CHM files. * The offset will be corrected later, once HS1 is read. */ if (read_off64(&offset, &buf[chmhst_OffsetHS0], sys, fh) || read_off64(&chm->dir_offset, &buf[chmhst_OffsetHS1], sys, fh) || read_off64(&chm->sec0.offset, &buf[chmhst3_OffsetCS0], sys, fh)) { return MSPACK_ERR_DATAFORMAT; } /* seek to header section 0 */ if (sys->seek(fh, offset, MSPACK_SYS_SEEK_START)) { return MSPACK_ERR_SEEK; } /* read header section 0 */ if (sys->read(fh, &buf[0], chmhs0_SIZEOF) != chmhs0_SIZEOF) { return MSPACK_ERR_READ; } if (read_off64(&chm->length, &buf[chmhs0_FileLen], sys, fh)) { return MSPACK_ERR_DATAFORMAT; } /* seek to header section 1 */ if (sys->seek(fh, chm->dir_offset, MSPACK_SYS_SEEK_START)) { return MSPACK_ERR_SEEK; } /* read header section 1 */ if (sys->read(fh, &buf[0], chmhs1_SIZEOF) != chmhs1_SIZEOF) { return MSPACK_ERR_READ; } chm->dir_offset = sys->tell(fh); chm->chunk_size = EndGetI32(&buf[chmhs1_ChunkSize]); chm->density = EndGetI32(&buf[chmhs1_Density]); chm->depth = EndGetI32(&buf[chmhs1_Depth]); chm->index_root = EndGetI32(&buf[chmhs1_IndexRoot]); chm->num_chunks = EndGetI32(&buf[chmhs1_NumChunks]); chm->first_pmgl = EndGetI32(&buf[chmhs1_FirstPMGL]); chm->last_pmgl = EndGetI32(&buf[chmhs1_LastPMGL]); if (chm->version < 3) { /* versions before 3 don't have chmhst3_OffsetCS0 */ chm->sec0.offset = chm->dir_offset + (chm->chunk_size * chm->num_chunks); } /* check if content offset or file size is wrong */ if (chm->sec0.offset > chm->length) { D(("content section begins after file has ended")) return MSPACK_ERR_DATAFORMAT; } /* ensure there are chunks and that chunk size is * large enough for signature and num_entries */ if (chm->chunk_size < (pmgl_Entries + 2)) { D(("chunk size not large enough")) return MSPACK_ERR_DATAFORMAT; } if (chm->num_chunks == 0) { D(("no chunks")) return MSPACK_ERR_DATAFORMAT; } /* The chunk_cache data structure is not great; large values for num_chunks * or num_chunks*chunk_size can exhaust all memory. Until a better chunk * cache is implemented, put arbitrary limits on num_chunks and chunk size. */ if (chm->num_chunks > 100000) { D(("more than 100,000 chunks")) return MSPACK_ERR_DATAFORMAT; } if ((off_t)chm->chunk_size * (off_t)chm->num_chunks > chm->length) { D(("chunks larger than entire file")) return MSPACK_ERR_DATAFORMAT; } /* common sense checks on header section 1 fields */ if ((chm->chunk_size & (chm->chunk_size - 1)) != 0) { sys->message(fh, "WARNING; chunk size is not a power of two"); } if (chm->first_pmgl != 0) { sys->message(fh, "WARNING; first PMGL chunk is not zero"); } if (chm->first_pmgl > chm->last_pmgl) { D(("first pmgl chunk is after last pmgl chunk")) return MSPACK_ERR_DATAFORMAT; } if (chm->index_root != 0xFFFFFFFF && chm->index_root > chm->num_chunks) { D(("index_root outside valid range")) return MSPACK_ERR_DATAFORMAT; } /* if we are doing a quick read, stop here! */ if (!entire) { return MSPACK_ERR_OK; } /* seek to the first PMGL chunk, and reduce the number of chunks to read */ if ((x = chm->first_pmgl) != 0) { if (sys->seek(fh,(off_t) (x * chm->chunk_size), MSPACK_SYS_SEEK_CUR)) { return MSPACK_ERR_SEEK; } } num_chunks = chm->last_pmgl - x + 1; if (!(chunk = (unsigned char *) sys->alloc(sys, (size_t)chm->chunk_size))) { return MSPACK_ERR_NOMEMORY; } /* read and process all chunks from FirstPMGL to LastPMGL */ errors = 0; while (num_chunks--) { /* read next chunk */ if (sys->read(fh, chunk, (int)chm->chunk_size) != (int)chm->chunk_size) { sys->free(chunk); return MSPACK_ERR_READ; } /* process only directory (PMGL) chunks */ if (EndGetI32(&chunk[pmgl_Signature]) != 0x4C474D50) continue; if (EndGetI32(&chunk[pmgl_QuickRefSize]) < 2) { sys->message(fh, "WARNING; PMGL quickref area is too small"); } if (EndGetI32(&chunk[pmgl_QuickRefSize]) > ((int)chm->chunk_size - pmgl_Entries)) { sys->message(fh, "WARNING; PMGL quickref area is too large"); } p = &chunk[pmgl_Entries]; end = &chunk[chm->chunk_size - 2]; num_entries = EndGetI16(end); while (num_entries--) { READ_ENCINT(name_len); if (name_len > (unsigned int) (end - p)) goto chunk_end; name = p; p += name_len; READ_ENCINT(section); READ_ENCINT(offset); READ_ENCINT(length); /* empty files and directory names are stored as a file entry at * offset 0 with length 0. We want to keep empty files, but not * directory names, which end with a "/" */ if ((offset == 0) && (length == 0)) { if ((name_len > 0) && (name[name_len-1] == '/')) continue; } if (section > 1) { sys->message(fh, "invalid section number '%u'.", section); continue; } if (!(fi = (struct mschmd_file *) sys->alloc(sys, sizeof(struct mschmd_file) + name_len + 1))) { sys->free(chunk); return MSPACK_ERR_NOMEMORY; } fi->next = NULL; fi->filename = (char *) &fi[1]; fi->section = ((section == 0) ? (struct mschmd_section *) (&chm->sec0) : (struct mschmd_section *) (&chm->sec1)); fi->offset = offset; fi->length = length; sys->copy(name, fi->filename, (size_t) name_len); fi->filename[name_len] = '\0'; if (name[0] == ':' && name[1] == ':') { /* system file */ if (mspack_memcmp(&name[2], &content_name[2], 31L) == 0) { if (mspack_memcmp(&name[33], &content_name[33], 8L) == 0) { chm->sec1.content = fi; } else if (mspack_memcmp(&name[33], &control_name[33], 11L) == 0) { chm->sec1.control = fi; } else if (mspack_memcmp(&name[33], &spaninfo_name[33], 8L) == 0) { chm->sec1.spaninfo = fi; } else if (mspack_memcmp(&name[33], &rtable_name[33], 72L) == 0) { chm->sec1.rtable = fi; } } fi->next = chm->sysfiles; chm->sysfiles = fi; } else { /* normal file */ if (link) link->next = fi; else chm->files = fi; link = fi; } } /* this is reached either when num_entries runs out, or if * reading data from the chunk reached a premature end of chunk */ chunk_end: if (num_entries >= 0) { D(("chunk ended before all entries could be read")) errors++; } } sys->free(chunk); return (errors > 0) ? MSPACK_ERR_DATAFORMAT : MSPACK_ERR_OK; } Commit Message: Fix off-by-one bounds check on CHM PMGI/PMGL chunk numbers and reject empty filenames. Thanks to Hanno Böck for reporting CWE ID: CWE-20
static int chmd_read_headers(struct mspack_system *sys, struct mspack_file *fh, struct mschmd_header *chm, int entire) { unsigned int section, name_len, x, errors, num_chunks; unsigned char buf[0x54], *chunk = NULL, *name, *p, *end; struct mschmd_file *fi, *link = NULL; off_t offset, length; int num_entries; /* initialise pointers */ chm->files = NULL; chm->sysfiles = NULL; chm->chunk_cache = NULL; chm->sec0.base.chm = chm; chm->sec0.base.id = 0; chm->sec1.base.chm = chm; chm->sec1.base.id = 1; chm->sec1.content = NULL; chm->sec1.control = NULL; chm->sec1.spaninfo = NULL; chm->sec1.rtable = NULL; /* read the first header */ if (sys->read(fh, &buf[0], chmhead_SIZEOF) != chmhead_SIZEOF) { return MSPACK_ERR_READ; } /* check ITSF signature */ if (EndGetI32(&buf[chmhead_Signature]) != 0x46535449) { return MSPACK_ERR_SIGNATURE; } /* check both header GUIDs */ if (mspack_memcmp(&buf[chmhead_GUID1], &guids[0], 32L) != 0) { D(("incorrect GUIDs")) return MSPACK_ERR_SIGNATURE; } chm->version = EndGetI32(&buf[chmhead_Version]); chm->timestamp = EndGetM32(&buf[chmhead_Timestamp]); chm->language = EndGetI32(&buf[chmhead_LanguageID]); if (chm->version > 3) { sys->message(fh, "WARNING; CHM version > 3"); } /* read the header section table */ if (sys->read(fh, &buf[0], chmhst3_SIZEOF) != chmhst3_SIZEOF) { return MSPACK_ERR_READ; } /* chmhst3_OffsetCS0 does not exist in version 1 or 2 CHM files. * The offset will be corrected later, once HS1 is read. */ if (read_off64(&offset, &buf[chmhst_OffsetHS0], sys, fh) || read_off64(&chm->dir_offset, &buf[chmhst_OffsetHS1], sys, fh) || read_off64(&chm->sec0.offset, &buf[chmhst3_OffsetCS0], sys, fh)) { return MSPACK_ERR_DATAFORMAT; } /* seek to header section 0 */ if (sys->seek(fh, offset, MSPACK_SYS_SEEK_START)) { return MSPACK_ERR_SEEK; } /* read header section 0 */ if (sys->read(fh, &buf[0], chmhs0_SIZEOF) != chmhs0_SIZEOF) { return MSPACK_ERR_READ; } if (read_off64(&chm->length, &buf[chmhs0_FileLen], sys, fh)) { return MSPACK_ERR_DATAFORMAT; } /* seek to header section 1 */ if (sys->seek(fh, chm->dir_offset, MSPACK_SYS_SEEK_START)) { return MSPACK_ERR_SEEK; } /* read header section 1 */ if (sys->read(fh, &buf[0], chmhs1_SIZEOF) != chmhs1_SIZEOF) { return MSPACK_ERR_READ; } chm->dir_offset = sys->tell(fh); chm->chunk_size = EndGetI32(&buf[chmhs1_ChunkSize]); chm->density = EndGetI32(&buf[chmhs1_Density]); chm->depth = EndGetI32(&buf[chmhs1_Depth]); chm->index_root = EndGetI32(&buf[chmhs1_IndexRoot]); chm->num_chunks = EndGetI32(&buf[chmhs1_NumChunks]); chm->first_pmgl = EndGetI32(&buf[chmhs1_FirstPMGL]); chm->last_pmgl = EndGetI32(&buf[chmhs1_LastPMGL]); if (chm->version < 3) { /* versions before 3 don't have chmhst3_OffsetCS0 */ chm->sec0.offset = chm->dir_offset + (chm->chunk_size * chm->num_chunks); } /* check if content offset or file size is wrong */ if (chm->sec0.offset > chm->length) { D(("content section begins after file has ended")) return MSPACK_ERR_DATAFORMAT; } /* ensure there are chunks and that chunk size is * large enough for signature and num_entries */ if (chm->chunk_size < (pmgl_Entries + 2)) { D(("chunk size not large enough")) return MSPACK_ERR_DATAFORMAT; } if (chm->num_chunks == 0) { D(("no chunks")) return MSPACK_ERR_DATAFORMAT; } /* The chunk_cache data structure is not great; large values for num_chunks * or num_chunks*chunk_size can exhaust all memory. Until a better chunk * cache is implemented, put arbitrary limits on num_chunks and chunk size. */ if (chm->num_chunks > 100000) { D(("more than 100,000 chunks")) return MSPACK_ERR_DATAFORMAT; } if ((off_t)chm->chunk_size * (off_t)chm->num_chunks > chm->length) { D(("chunks larger than entire file")) return MSPACK_ERR_DATAFORMAT; } /* common sense checks on header section 1 fields */ if ((chm->chunk_size & (chm->chunk_size - 1)) != 0) { sys->message(fh, "WARNING; chunk size is not a power of two"); } if (chm->first_pmgl != 0) { sys->message(fh, "WARNING; first PMGL chunk is not zero"); } if (chm->first_pmgl > chm->last_pmgl) { D(("first pmgl chunk is after last pmgl chunk")) return MSPACK_ERR_DATAFORMAT; } if (chm->index_root != 0xFFFFFFFF && chm->index_root >= chm->num_chunks) { D(("index_root outside valid range")) return MSPACK_ERR_DATAFORMAT; } /* if we are doing a quick read, stop here! */ if (!entire) { return MSPACK_ERR_OK; } /* seek to the first PMGL chunk, and reduce the number of chunks to read */ if ((x = chm->first_pmgl) != 0) { if (sys->seek(fh,(off_t) (x * chm->chunk_size), MSPACK_SYS_SEEK_CUR)) { return MSPACK_ERR_SEEK; } } num_chunks = chm->last_pmgl - x + 1; if (!(chunk = (unsigned char *) sys->alloc(sys, (size_t)chm->chunk_size))) { return MSPACK_ERR_NOMEMORY; } /* read and process all chunks from FirstPMGL to LastPMGL */ errors = 0; while (num_chunks--) { /* read next chunk */ if (sys->read(fh, chunk, (int)chm->chunk_size) != (int)chm->chunk_size) { sys->free(chunk); return MSPACK_ERR_READ; } /* process only directory (PMGL) chunks */ if (EndGetI32(&chunk[pmgl_Signature]) != 0x4C474D50) continue; if (EndGetI32(&chunk[pmgl_QuickRefSize]) < 2) { sys->message(fh, "WARNING; PMGL quickref area is too small"); } if (EndGetI32(&chunk[pmgl_QuickRefSize]) > ((int)chm->chunk_size - pmgl_Entries)) { sys->message(fh, "WARNING; PMGL quickref area is too large"); } p = &chunk[pmgl_Entries]; end = &chunk[chm->chunk_size - 2]; num_entries = EndGetI16(end); while (num_entries--) { READ_ENCINT(name_len); if (name_len > (unsigned int) (end - p)) goto chunk_end; /* consider blank filenames to be an error */ if (name_len == 0) goto chunk_end; name = p; p += name_len; READ_ENCINT(section); READ_ENCINT(offset); READ_ENCINT(length); /* empty files and directory names are stored as a file entry at * offset 0 with length 0. We want to keep empty files, but not * directory names, which end with a "/" */ if ((offset == 0) && (length == 0)) { if ((name_len > 0) && (name[name_len-1] == '/')) continue; } if (section > 1) { sys->message(fh, "invalid section number '%u'.", section); continue; } if (!(fi = (struct mschmd_file *) sys->alloc(sys, sizeof(struct mschmd_file) + name_len + 1))) { sys->free(chunk); return MSPACK_ERR_NOMEMORY; } fi->next = NULL; fi->filename = (char *) &fi[1]; fi->section = ((section == 0) ? (struct mschmd_section *) (&chm->sec0) : (struct mschmd_section *) (&chm->sec1)); fi->offset = offset; fi->length = length; sys->copy(name, fi->filename, (size_t) name_len); fi->filename[name_len] = '\0'; if (name[0] == ':' && name[1] == ':') { /* system file */ if (mspack_memcmp(&name[2], &content_name[2], 31L) == 0) { if (mspack_memcmp(&name[33], &content_name[33], 8L) == 0) { chm->sec1.content = fi; } else if (mspack_memcmp(&name[33], &control_name[33], 11L) == 0) { chm->sec1.control = fi; } else if (mspack_memcmp(&name[33], &spaninfo_name[33], 8L) == 0) { chm->sec1.spaninfo = fi; } else if (mspack_memcmp(&name[33], &rtable_name[33], 72L) == 0) { chm->sec1.rtable = fi; } } fi->next = chm->sysfiles; chm->sysfiles = fi; } else { /* normal file */ if (link) link->next = fi; else chm->files = fi; link = fi; } } /* this is reached either when num_entries runs out, or if * reading data from the chunk reached a premature end of chunk */ chunk_end: if (num_entries >= 0) { D(("chunk ended before all entries could be read")) errors++; } } sys->free(chunk); return (errors > 0) ? MSPACK_ERR_DATAFORMAT : MSPACK_ERR_OK; }
169,112
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void BrowserPolicyConnector::DeviceStopAutoRetry() { #if defined(OS_CHROMEOS) if (device_cloud_policy_subsystem_.get()) device_cloud_policy_subsystem_->StopAutoRetry(); #endif } Commit Message: Reset the device policy machinery upon retrying enrollment. BUG=chromium-os:18208 TEST=See bug description Review URL: http://codereview.chromium.org/7676005 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@97615 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
void BrowserPolicyConnector::DeviceStopAutoRetry() { void BrowserPolicyConnector::ResetDevicePolicy() { #if defined(OS_CHROMEOS) if (device_cloud_policy_subsystem_.get()) device_cloud_policy_subsystem_->Reset(); #endif }
170,279
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int ssl_scan_clienthello_tlsext(SSL *s, PACKET *pkt, int *al) { unsigned int type; int renegotiate_seen = 0; PACKET extensions; *al = SSL_AD_DECODE_ERROR; s->servername_done = 0; s->tlsext_status_type = -1; #ifndef OPENSSL_NO_NEXTPROTONEG s->s3->next_proto_neg_seen = 0; #endif OPENSSL_free(s->s3->alpn_selected); s->s3->alpn_selected = NULL; s->s3->alpn_selected_len = 0; OPENSSL_free(s->s3->alpn_proposed); s->s3->alpn_proposed = NULL; s->s3->alpn_proposed_len = 0; #ifndef OPENSSL_NO_HEARTBEATS s->tlsext_heartbeat &= ~(SSL_DTLSEXT_HB_ENABLED | SSL_DTLSEXT_HB_DONT_SEND_REQUESTS); #endif #ifndef OPENSSL_NO_EC if (s->options & SSL_OP_SAFARI_ECDHE_ECDSA_BUG) ssl_check_for_safari(s, pkt); #endif /* !OPENSSL_NO_EC */ /* Clear any signature algorithms extension received */ OPENSSL_free(s->s3->tmp.peer_sigalgs); s->s3->tmp.peer_sigalgs = NULL; s->s3->flags &= ~TLS1_FLAGS_ENCRYPT_THEN_MAC; #ifndef OPENSSL_NO_SRP OPENSSL_free(s->srp_ctx.login); s->srp_ctx.login = NULL; #endif s->srtp_profile = NULL; if (PACKET_remaining(pkt) == 0) goto ri_check; if (!PACKET_as_length_prefixed_2(pkt, &extensions)) return 0; if (!tls1_check_duplicate_extensions(&extensions)) return 0; /* * We parse all extensions to ensure the ClientHello is well-formed but, * unless an extension specifies otherwise, we ignore extensions upon * resumption. */ while (PACKET_get_net_2(&extensions, &type)) { PACKET extension; if (!PACKET_get_length_prefixed_2(&extensions, &extension)) return 0; if (s->tlsext_debug_cb) s->tlsext_debug_cb(s, 0, type, PACKET_data(&extension), PACKET_remaining(&extension), s->tlsext_debug_arg); if (type == TLSEXT_TYPE_renegotiate) { if (!ssl_parse_clienthello_renegotiate_ext(s, &extension, al)) return 0; renegotiate_seen = 1; } else if (s->version == SSL3_VERSION) { } /*- * The servername extension is treated as follows: * * - Only the hostname type is supported with a maximum length of 255. * - The servername is rejected if too long or if it contains zeros, * in which case an fatal alert is generated. * - The servername field is maintained together with the session cache. * - When a session is resumed, the servername call back invoked in order * to allow the application to position itself to the right context. * - The servername is acknowledged if it is new for a session or when * it is identical to a previously used for the same session. * Applications can control the behaviour. They can at any time * set a 'desirable' servername for a new SSL object. This can be the * case for example with HTTPS when a Host: header field is received and * a renegotiation is requested. In this case, a possible servername * presented in the new client hello is only acknowledged if it matches * the value of the Host: field. * - Applications must use SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION * if they provide for changing an explicit servername context for the * session, i.e. when the session has been established with a servername * extension. * - On session reconnect, the servername extension may be absent. * */ else if (type == TLSEXT_TYPE_server_name) { unsigned int servname_type; PACKET sni, hostname; if (!PACKET_as_length_prefixed_2(&extension, &sni) /* ServerNameList must be at least 1 byte long. */ || PACKET_remaining(&sni) == 0) { return 0; } /* * Although the server_name extension was intended to be * extensible to new name types, RFC 4366 defined the * syntax inextensibility and OpenSSL 1.0.x parses it as * such. * RFC 6066 corrected the mistake but adding new name types * is nevertheless no longer feasible, so act as if no other * SNI types can exist, to simplify parsing. * * Also note that the RFC permits only one SNI value per type, * i.e., we can only have a single hostname. */ if (!PACKET_get_1(&sni, &servname_type) || servname_type != TLSEXT_NAMETYPE_host_name || !PACKET_as_length_prefixed_2(&sni, &hostname)) { return 0; } if (!s->hit) { if (PACKET_remaining(&hostname) > TLSEXT_MAXLEN_host_name) { *al = TLS1_AD_UNRECOGNIZED_NAME; return 0; } if (PACKET_contains_zero_byte(&hostname)) { *al = TLS1_AD_UNRECOGNIZED_NAME; return 0; } if (!PACKET_strndup(&hostname, &s->session->tlsext_hostname)) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } s->servername_done = 1; } else { /* * TODO(openssl-team): if the SNI doesn't match, we MUST * fall back to a full handshake. */ s->servername_done = s->session->tlsext_hostname && PACKET_equal(&hostname, s->session->tlsext_hostname, strlen(s->session->tlsext_hostname)); } } #ifndef OPENSSL_NO_SRP else if (type == TLSEXT_TYPE_srp) { PACKET srp_I; if (!PACKET_as_length_prefixed_1(&extension, &srp_I)) return 0; if (PACKET_contains_zero_byte(&srp_I)) return 0; /* * TODO(openssl-team): currently, we re-authenticate the user * upon resumption. Instead, we MUST ignore the login. */ if (!PACKET_strndup(&srp_I, &s->srp_ctx.login)) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } } #endif #ifndef OPENSSL_NO_EC else if (type == TLSEXT_TYPE_ec_point_formats) { PACKET ec_point_format_list; if (!PACKET_as_length_prefixed_1(&extension, &ec_point_format_list) || PACKET_remaining(&ec_point_format_list) == 0) { return 0; } if (!s->hit) { if (!PACKET_memdup(&ec_point_format_list, &s->session->tlsext_ecpointformatlist, &s-> session->tlsext_ecpointformatlist_length)) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } } } else if (type == TLSEXT_TYPE_elliptic_curves) { PACKET elliptic_curve_list; /* Each NamedCurve is 2 bytes and we must have at least 1. */ if (!PACKET_as_length_prefixed_2(&extension, &elliptic_curve_list) || PACKET_remaining(&elliptic_curve_list) == 0 || (PACKET_remaining(&elliptic_curve_list) % 2) != 0) { return 0; } if (!s->hit) { if (!PACKET_memdup(&elliptic_curve_list, &s->session->tlsext_ellipticcurvelist, &s-> session->tlsext_ellipticcurvelist_length)) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } } } #endif /* OPENSSL_NO_EC */ else if (type == TLSEXT_TYPE_session_ticket) { if (s->tls_session_ticket_ext_cb && !s->tls_session_ticket_ext_cb(s, PACKET_data(&extension), PACKET_remaining(&extension), s->tls_session_ticket_ext_cb_arg)) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } } else if (type == TLSEXT_TYPE_signature_algorithms) { PACKET supported_sig_algs; if (!PACKET_as_length_prefixed_2(&extension, &supported_sig_algs) || (PACKET_remaining(&supported_sig_algs) % 2) != 0 || PACKET_remaining(&supported_sig_algs) == 0) { return 0; } if (!s->hit) { if (!tls1_save_sigalgs(s, PACKET_data(&supported_sig_algs), PACKET_remaining(&supported_sig_algs))) { return 0; } } } else if (type == TLSEXT_TYPE_status_request) { if (!PACKET_get_1(&extension, (unsigned int *)&s->tlsext_status_type)) { return 0; } #ifndef OPENSSL_NO_OCSP if (s->tlsext_status_type == TLSEXT_STATUSTYPE_ocsp) { const unsigned char *ext_data; PACKET responder_id_list, exts; if (!PACKET_get_length_prefixed_2 (&extension, &responder_id_list)) return 0; /* * We remove any OCSP_RESPIDs from a previous handshake * to prevent unbounded memory growth - CVE-2016-6304 */ sk_OCSP_RESPID_pop_free(s->tlsext_ocsp_ids, OCSP_RESPID_free); if (PACKET_remaining(&responder_id_list) > 0) { s->tlsext_ocsp_ids = sk_OCSP_RESPID_new_null(); if (s->tlsext_ocsp_ids == NULL) { *al = SSL_AD_INTERNAL_ERROR; return 0; } } else { s->tlsext_ocsp_ids = NULL; } while (PACKET_remaining(&responder_id_list) > 0) { OCSP_RESPID *id; PACKET responder_id; const unsigned char *id_data; if (!PACKET_get_length_prefixed_2(&responder_id_list, &responder_id) || PACKET_remaining(&responder_id) == 0) { return 0; } id_data = PACKET_data(&responder_id); id = d2i_OCSP_RESPID(NULL, &id_data, PACKET_remaining(&responder_id)); if (id == NULL) return 0; if (id_data != PACKET_end(&responder_id)) { OCSP_RESPID_free(id); return 0; } if (!sk_OCSP_RESPID_push(s->tlsext_ocsp_ids, id)) { OCSP_RESPID_free(id); *al = SSL_AD_INTERNAL_ERROR; return 0; } } /* Read in request_extensions */ if (!PACKET_as_length_prefixed_2(&extension, &exts)) return 0; if (PACKET_remaining(&exts) > 0) { ext_data = PACKET_data(&exts); sk_X509_EXTENSION_pop_free(s->tlsext_ocsp_exts, X509_EXTENSION_free); s->tlsext_ocsp_exts = d2i_X509_EXTENSIONS(NULL, &ext_data, PACKET_remaining(&exts)); if (s->tlsext_ocsp_exts == NULL || ext_data != PACKET_end(&exts)) { return 0; } } } else #endif { /* * We don't know what to do with any other type so ignore it. */ s->tlsext_status_type = -1; } } #ifndef OPENSSL_NO_HEARTBEATS else if (SSL_IS_DTLS(s) && type == TLSEXT_TYPE_heartbeat) { unsigned int hbtype; if (!PACKET_get_1(&extension, &hbtype) || PACKET_remaining(&extension)) { *al = SSL_AD_DECODE_ERROR; return 0; } switch (hbtype) { case 0x01: /* Client allows us to send HB requests */ s->tlsext_heartbeat |= SSL_DTLSEXT_HB_ENABLED; break; case 0x02: /* Client doesn't accept HB requests */ s->tlsext_heartbeat |= SSL_DTLSEXT_HB_ENABLED; s->tlsext_heartbeat |= SSL_DTLSEXT_HB_DONT_SEND_REQUESTS; break; default: *al = SSL_AD_ILLEGAL_PARAMETER; return 0; } } #endif #ifndef OPENSSL_NO_NEXTPROTONEG else if (type == TLSEXT_TYPE_next_proto_neg && s->s3->tmp.finish_md_len == 0) { /*- * We shouldn't accept this extension on a * renegotiation. * * s->new_session will be set on renegotiation, but we * probably shouldn't rely that it couldn't be set on * the initial renegotiation too in certain cases (when * there's some other reason to disallow resuming an * earlier session -- the current code won't be doing * anything like that, but this might change). * * A valid sign that there's been a previous handshake * in this connection is if s->s3->tmp.finish_md_len > * 0. (We are talking about a check that will happen * in the Hello protocol round, well before a new * Finished message could have been computed.) */ s->s3->next_proto_neg_seen = 1; } #endif else if (type == TLSEXT_TYPE_application_layer_protocol_negotiation && s->s3->tmp.finish_md_len == 0) { if (!tls1_alpn_handle_client_hello(s, &extension, al)) return 0; } /* session ticket processed earlier */ #ifndef OPENSSL_NO_SRTP else if (SSL_IS_DTLS(s) && SSL_get_srtp_profiles(s) && type == TLSEXT_TYPE_use_srtp) { if (ssl_parse_clienthello_use_srtp_ext(s, &extension, al)) return 0; } #endif else if (type == TLSEXT_TYPE_encrypt_then_mac) s->s3->flags |= TLS1_FLAGS_ENCRYPT_THEN_MAC; /* * Note: extended master secret extension handled in * tls_check_serverhello_tlsext_early() */ /* * If this ClientHello extension was unhandled and this is a * nonresumed connection, check whether the extension is a custom * TLS Extension (has a custom_srv_ext_record), and if so call the * callback and record the extension number so that an appropriate * ServerHello may be later returned. */ else if (!s->hit) { if (custom_ext_parse(s, 1, type, PACKET_data(&extension), PACKET_remaining(&extension), al) <= 0) return 0; } } if (PACKET_remaining(pkt) != 0) { /* * tls1_check_duplicate_extensions should ensure this never happens. */ *al = SSL_AD_INTERNAL_ERROR; return 0; } ri_check: /* Need RI if renegotiating */ if (!renegotiate_seen && s->renegotiate && !(s->options & SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION)) { *al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL_SCAN_CLIENTHELLO_TLSEXT, SSL_R_UNSAFE_LEGACY_RENEGOTIATION_DISABLED); return 0; } /* * This function currently has no state to clean up, so it returns directly. * If parsing fails at any point, the function returns early. * The SSL object may be left with partial data from extensions, but it must * then no longer be used, and clearing it up will free the leftovers. */ return 1; } Commit Message: Don't change the state of the ETM flags until CCS processing Changing the ciphersuite during a renegotiation can result in a crash leading to a DoS attack. ETM has not been implemented in 1.1.0 for DTLS so this is TLS only. The problem is caused by changing the flag indicating whether to use ETM or not immediately on negotiation of ETM, rather than at CCS. Therefore, during a renegotiation, if the ETM state is changing (usually due to a change of ciphersuite), then an error/crash will occur. Due to the fact that there are separate CCS messages for read and write we actually now need two flags to determine whether to use ETM or not. CVE-2017-3733 Reviewed-by: Richard Levitte <[email protected]> CWE ID: CWE-20
static int ssl_scan_clienthello_tlsext(SSL *s, PACKET *pkt, int *al) { unsigned int type; int renegotiate_seen = 0; PACKET extensions; *al = SSL_AD_DECODE_ERROR; s->servername_done = 0; s->tlsext_status_type = -1; #ifndef OPENSSL_NO_NEXTPROTONEG s->s3->next_proto_neg_seen = 0; #endif OPENSSL_free(s->s3->alpn_selected); s->s3->alpn_selected = NULL; s->s3->alpn_selected_len = 0; OPENSSL_free(s->s3->alpn_proposed); s->s3->alpn_proposed = NULL; s->s3->alpn_proposed_len = 0; #ifndef OPENSSL_NO_HEARTBEATS s->tlsext_heartbeat &= ~(SSL_DTLSEXT_HB_ENABLED | SSL_DTLSEXT_HB_DONT_SEND_REQUESTS); #endif #ifndef OPENSSL_NO_EC if (s->options & SSL_OP_SAFARI_ECDHE_ECDSA_BUG) ssl_check_for_safari(s, pkt); #endif /* !OPENSSL_NO_EC */ /* Clear any signature algorithms extension received */ OPENSSL_free(s->s3->tmp.peer_sigalgs); s->s3->tmp.peer_sigalgs = NULL; s->tlsext_use_etm = 0; #ifndef OPENSSL_NO_SRP OPENSSL_free(s->srp_ctx.login); s->srp_ctx.login = NULL; #endif s->srtp_profile = NULL; if (PACKET_remaining(pkt) == 0) goto ri_check; if (!PACKET_as_length_prefixed_2(pkt, &extensions)) return 0; if (!tls1_check_duplicate_extensions(&extensions)) return 0; /* * We parse all extensions to ensure the ClientHello is well-formed but, * unless an extension specifies otherwise, we ignore extensions upon * resumption. */ while (PACKET_get_net_2(&extensions, &type)) { PACKET extension; if (!PACKET_get_length_prefixed_2(&extensions, &extension)) return 0; if (s->tlsext_debug_cb) s->tlsext_debug_cb(s, 0, type, PACKET_data(&extension), PACKET_remaining(&extension), s->tlsext_debug_arg); if (type == TLSEXT_TYPE_renegotiate) { if (!ssl_parse_clienthello_renegotiate_ext(s, &extension, al)) return 0; renegotiate_seen = 1; } else if (s->version == SSL3_VERSION) { } /*- * The servername extension is treated as follows: * * - Only the hostname type is supported with a maximum length of 255. * - The servername is rejected if too long or if it contains zeros, * in which case an fatal alert is generated. * - The servername field is maintained together with the session cache. * - When a session is resumed, the servername call back invoked in order * to allow the application to position itself to the right context. * - The servername is acknowledged if it is new for a session or when * it is identical to a previously used for the same session. * Applications can control the behaviour. They can at any time * set a 'desirable' servername for a new SSL object. This can be the * case for example with HTTPS when a Host: header field is received and * a renegotiation is requested. In this case, a possible servername * presented in the new client hello is only acknowledged if it matches * the value of the Host: field. * - Applications must use SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION * if they provide for changing an explicit servername context for the * session, i.e. when the session has been established with a servername * extension. * - On session reconnect, the servername extension may be absent. * */ else if (type == TLSEXT_TYPE_server_name) { unsigned int servname_type; PACKET sni, hostname; if (!PACKET_as_length_prefixed_2(&extension, &sni) /* ServerNameList must be at least 1 byte long. */ || PACKET_remaining(&sni) == 0) { return 0; } /* * Although the server_name extension was intended to be * extensible to new name types, RFC 4366 defined the * syntax inextensibility and OpenSSL 1.0.x parses it as * such. * RFC 6066 corrected the mistake but adding new name types * is nevertheless no longer feasible, so act as if no other * SNI types can exist, to simplify parsing. * * Also note that the RFC permits only one SNI value per type, * i.e., we can only have a single hostname. */ if (!PACKET_get_1(&sni, &servname_type) || servname_type != TLSEXT_NAMETYPE_host_name || !PACKET_as_length_prefixed_2(&sni, &hostname)) { return 0; } if (!s->hit) { if (PACKET_remaining(&hostname) > TLSEXT_MAXLEN_host_name) { *al = TLS1_AD_UNRECOGNIZED_NAME; return 0; } if (PACKET_contains_zero_byte(&hostname)) { *al = TLS1_AD_UNRECOGNIZED_NAME; return 0; } if (!PACKET_strndup(&hostname, &s->session->tlsext_hostname)) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } s->servername_done = 1; } else { /* * TODO(openssl-team): if the SNI doesn't match, we MUST * fall back to a full handshake. */ s->servername_done = s->session->tlsext_hostname && PACKET_equal(&hostname, s->session->tlsext_hostname, strlen(s->session->tlsext_hostname)); } } #ifndef OPENSSL_NO_SRP else if (type == TLSEXT_TYPE_srp) { PACKET srp_I; if (!PACKET_as_length_prefixed_1(&extension, &srp_I)) return 0; if (PACKET_contains_zero_byte(&srp_I)) return 0; /* * TODO(openssl-team): currently, we re-authenticate the user * upon resumption. Instead, we MUST ignore the login. */ if (!PACKET_strndup(&srp_I, &s->srp_ctx.login)) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } } #endif #ifndef OPENSSL_NO_EC else if (type == TLSEXT_TYPE_ec_point_formats) { PACKET ec_point_format_list; if (!PACKET_as_length_prefixed_1(&extension, &ec_point_format_list) || PACKET_remaining(&ec_point_format_list) == 0) { return 0; } if (!s->hit) { if (!PACKET_memdup(&ec_point_format_list, &s->session->tlsext_ecpointformatlist, &s-> session->tlsext_ecpointformatlist_length)) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } } } else if (type == TLSEXT_TYPE_elliptic_curves) { PACKET elliptic_curve_list; /* Each NamedCurve is 2 bytes and we must have at least 1. */ if (!PACKET_as_length_prefixed_2(&extension, &elliptic_curve_list) || PACKET_remaining(&elliptic_curve_list) == 0 || (PACKET_remaining(&elliptic_curve_list) % 2) != 0) { return 0; } if (!s->hit) { if (!PACKET_memdup(&elliptic_curve_list, &s->session->tlsext_ellipticcurvelist, &s-> session->tlsext_ellipticcurvelist_length)) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } } } #endif /* OPENSSL_NO_EC */ else if (type == TLSEXT_TYPE_session_ticket) { if (s->tls_session_ticket_ext_cb && !s->tls_session_ticket_ext_cb(s, PACKET_data(&extension), PACKET_remaining(&extension), s->tls_session_ticket_ext_cb_arg)) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } } else if (type == TLSEXT_TYPE_signature_algorithms) { PACKET supported_sig_algs; if (!PACKET_as_length_prefixed_2(&extension, &supported_sig_algs) || (PACKET_remaining(&supported_sig_algs) % 2) != 0 || PACKET_remaining(&supported_sig_algs) == 0) { return 0; } if (!s->hit) { if (!tls1_save_sigalgs(s, PACKET_data(&supported_sig_algs), PACKET_remaining(&supported_sig_algs))) { return 0; } } } else if (type == TLSEXT_TYPE_status_request) { if (!PACKET_get_1(&extension, (unsigned int *)&s->tlsext_status_type)) { return 0; } #ifndef OPENSSL_NO_OCSP if (s->tlsext_status_type == TLSEXT_STATUSTYPE_ocsp) { const unsigned char *ext_data; PACKET responder_id_list, exts; if (!PACKET_get_length_prefixed_2 (&extension, &responder_id_list)) return 0; /* * We remove any OCSP_RESPIDs from a previous handshake * to prevent unbounded memory growth - CVE-2016-6304 */ sk_OCSP_RESPID_pop_free(s->tlsext_ocsp_ids, OCSP_RESPID_free); if (PACKET_remaining(&responder_id_list) > 0) { s->tlsext_ocsp_ids = sk_OCSP_RESPID_new_null(); if (s->tlsext_ocsp_ids == NULL) { *al = SSL_AD_INTERNAL_ERROR; return 0; } } else { s->tlsext_ocsp_ids = NULL; } while (PACKET_remaining(&responder_id_list) > 0) { OCSP_RESPID *id; PACKET responder_id; const unsigned char *id_data; if (!PACKET_get_length_prefixed_2(&responder_id_list, &responder_id) || PACKET_remaining(&responder_id) == 0) { return 0; } id_data = PACKET_data(&responder_id); id = d2i_OCSP_RESPID(NULL, &id_data, PACKET_remaining(&responder_id)); if (id == NULL) return 0; if (id_data != PACKET_end(&responder_id)) { OCSP_RESPID_free(id); return 0; } if (!sk_OCSP_RESPID_push(s->tlsext_ocsp_ids, id)) { OCSP_RESPID_free(id); *al = SSL_AD_INTERNAL_ERROR; return 0; } } /* Read in request_extensions */ if (!PACKET_as_length_prefixed_2(&extension, &exts)) return 0; if (PACKET_remaining(&exts) > 0) { ext_data = PACKET_data(&exts); sk_X509_EXTENSION_pop_free(s->tlsext_ocsp_exts, X509_EXTENSION_free); s->tlsext_ocsp_exts = d2i_X509_EXTENSIONS(NULL, &ext_data, PACKET_remaining(&exts)); if (s->tlsext_ocsp_exts == NULL || ext_data != PACKET_end(&exts)) { return 0; } } } else #endif { /* * We don't know what to do with any other type so ignore it. */ s->tlsext_status_type = -1; } } #ifndef OPENSSL_NO_HEARTBEATS else if (SSL_IS_DTLS(s) && type == TLSEXT_TYPE_heartbeat) { unsigned int hbtype; if (!PACKET_get_1(&extension, &hbtype) || PACKET_remaining(&extension)) { *al = SSL_AD_DECODE_ERROR; return 0; } switch (hbtype) { case 0x01: /* Client allows us to send HB requests */ s->tlsext_heartbeat |= SSL_DTLSEXT_HB_ENABLED; break; case 0x02: /* Client doesn't accept HB requests */ s->tlsext_heartbeat |= SSL_DTLSEXT_HB_ENABLED; s->tlsext_heartbeat |= SSL_DTLSEXT_HB_DONT_SEND_REQUESTS; break; default: *al = SSL_AD_ILLEGAL_PARAMETER; return 0; } } #endif #ifndef OPENSSL_NO_NEXTPROTONEG else if (type == TLSEXT_TYPE_next_proto_neg && s->s3->tmp.finish_md_len == 0) { /*- * We shouldn't accept this extension on a * renegotiation. * * s->new_session will be set on renegotiation, but we * probably shouldn't rely that it couldn't be set on * the initial renegotiation too in certain cases (when * there's some other reason to disallow resuming an * earlier session -- the current code won't be doing * anything like that, but this might change). * * A valid sign that there's been a previous handshake * in this connection is if s->s3->tmp.finish_md_len > * 0. (We are talking about a check that will happen * in the Hello protocol round, well before a new * Finished message could have been computed.) */ s->s3->next_proto_neg_seen = 1; } #endif else if (type == TLSEXT_TYPE_application_layer_protocol_negotiation && s->s3->tmp.finish_md_len == 0) { if (!tls1_alpn_handle_client_hello(s, &extension, al)) return 0; } /* session ticket processed earlier */ #ifndef OPENSSL_NO_SRTP else if (SSL_IS_DTLS(s) && SSL_get_srtp_profiles(s) && type == TLSEXT_TYPE_use_srtp) { if (ssl_parse_clienthello_use_srtp_ext(s, &extension, al)) return 0; } #endif else if (type == TLSEXT_TYPE_encrypt_then_mac) s->tlsext_use_etm = 1; /* * Note: extended master secret extension handled in * tls_check_serverhello_tlsext_early() */ /* * If this ClientHello extension was unhandled and this is a * nonresumed connection, check whether the extension is a custom * TLS Extension (has a custom_srv_ext_record), and if so call the * callback and record the extension number so that an appropriate * ServerHello may be later returned. */ else if (!s->hit) { if (custom_ext_parse(s, 1, type, PACKET_data(&extension), PACKET_remaining(&extension), al) <= 0) return 0; } } if (PACKET_remaining(pkt) != 0) { /* * tls1_check_duplicate_extensions should ensure this never happens. */ *al = SSL_AD_INTERNAL_ERROR; return 0; } ri_check: /* Need RI if renegotiating */ if (!renegotiate_seen && s->renegotiate && !(s->options & SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION)) { *al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL_SCAN_CLIENTHELLO_TLSEXT, SSL_R_UNSAFE_LEGACY_RENEGOTIATION_DISABLED); return 0; } /* * This function currently has no state to clean up, so it returns directly. * If parsing fails at any point, the function returns early. * The SSL object may be left with partial data from extensions, but it must * then no longer be used, and clearing it up will free the leftovers. */ return 1; }
168,428
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static Frame* ReuseExistingWindow(LocalFrame& active_frame, LocalFrame& lookup_frame, const AtomicString& frame_name, NavigationPolicy policy, const KURL& destination_url) { if (!frame_name.IsEmpty() && !EqualIgnoringASCIICase(frame_name, "_blank") && policy == kNavigationPolicyIgnore) { if (Frame* frame = lookup_frame.FindFrameForNavigation( frame_name, active_frame, destination_url)) { if (!EqualIgnoringASCIICase(frame_name, "_self")) { if (Page* page = frame->GetPage()) { if (page == active_frame.GetPage()) page->GetFocusController().SetFocusedFrame(frame); else page->GetChromeClient().Focus(); } } return frame; } } return nullptr; } Commit Message: If a page calls |window.focus()|, kick it out of fullscreen. BUG=776418, 800056 Change-Id: I1880fe600e4814c073f247c43b1c1ac80c8fc017 Reviewed-on: https://chromium-review.googlesource.com/852378 Reviewed-by: Nasko Oskov <[email protected]> Reviewed-by: Philip Jägenstedt <[email protected]> Commit-Queue: Avi Drissman <[email protected]> Cr-Commit-Position: refs/heads/master@{#533790} CWE ID:
static Frame* ReuseExistingWindow(LocalFrame& active_frame, LocalFrame& lookup_frame, const AtomicString& frame_name, NavigationPolicy policy, const KURL& destination_url) { if (!frame_name.IsEmpty() && !EqualIgnoringASCIICase(frame_name, "_blank") && policy == kNavigationPolicyIgnore) { if (Frame* frame = lookup_frame.FindFrameForNavigation( frame_name, active_frame, destination_url)) { if (!EqualIgnoringASCIICase(frame_name, "_self")) { if (Page* page = frame->GetPage()) { if (page == active_frame.GetPage()) page->GetFocusController().SetFocusedFrame(frame); else page->GetChromeClient().Focus(&active_frame); } } return frame; } } return nullptr; }
172,725
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bit_in(PG_FUNCTION_ARGS) { char *input_string = PG_GETARG_CSTRING(0); #ifdef NOT_USED Oid typelem = PG_GETARG_OID(1); #endif int32 atttypmod = PG_GETARG_INT32(2); VarBit *result; /* The resulting bit string */ char *sp; /* pointer into the character string */ bits8 *r; /* pointer into the result */ int len, /* Length of the whole data structure */ bitlen, /* Number of bits in the bit string */ slen; /* Length of the input string */ bool bit_not_hex; /* false = hex string true = bit string */ int bc; bits8 x = 0; /* Check that the first character is a b or an x */ if (input_string[0] == 'b' || input_string[0] == 'B') { bit_not_hex = true; sp = input_string + 1; } else if (input_string[0] == 'x' || input_string[0] == 'X') { bit_not_hex = false; sp = input_string + 1; } else { /* * Otherwise it's binary. This allows things like cast('1001' as bit) * to work transparently. */ bit_not_hex = true; sp = input_string; } slen = strlen(sp); /* Determine bitlength from input string */ if (bit_not_hex) bitlen = slen; else bitlen = slen * 4; /* * Sometimes atttypmod is not supplied. If it is supplied we need to make * sure that the bitstring fits. */ if (atttypmod <= 0) atttypmod = bitlen; else if (bitlen != atttypmod) ereport(ERROR, (errcode(ERRCODE_STRING_DATA_LENGTH_MISMATCH), errmsg("bit string length %d does not match type bit(%d)", bitlen, atttypmod))); len = VARBITTOTALLEN(atttypmod); /* set to 0 so that *r is always initialised and string is zero-padded */ result = (VarBit *) palloc0(len); SET_VARSIZE(result, len); VARBITLEN(result) = atttypmod; r = VARBITS(result); if (bit_not_hex) { /* Parse the bit representation of the string */ /* We know it fits, as bitlen was compared to atttypmod */ x = HIGHBIT; for (; *sp; sp++) { if (*sp == '1') *r |= x; else if (*sp != '0') ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("\"%c\" is not a valid binary digit", *sp))); x >>= 1; if (x == 0) { x = HIGHBIT; r++; } } } else { /* Parse the hex representation of the string */ for (bc = 0; *sp; sp++) { if (*sp >= '0' && *sp <= '9') x = (bits8) (*sp - '0'); else if (*sp >= 'A' && *sp <= 'F') x = (bits8) (*sp - 'A') + 10; else if (*sp >= 'a' && *sp <= 'f') x = (bits8) (*sp - 'a') + 10; else ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("\"%c\" is not a valid hexadecimal digit", *sp))); if (bc) { *r++ |= x; bc = 0; } else { *r = x << 4; bc = 1; } } } PG_RETURN_VARBIT_P(result); } Commit Message: Predict integer overflow to avoid buffer overruns. Several functions, mostly type input functions, calculated an allocation size such that the calculation wrapped to a small positive value when arguments implied a sufficiently-large requirement. Writes past the end of the inadvertent small allocation followed shortly thereafter. Coverity identified the path_in() vulnerability; code inspection led to the rest. In passing, add check_stack_depth() to prevent stack overflow in related functions. Back-patch to 8.4 (all supported versions). The non-comment hstore changes touch code that did not exist in 8.4, so that part stops at 9.0. Noah Misch and Heikki Linnakangas, reviewed by Tom Lane. Security: CVE-2014-0064 CWE ID: CWE-189
bit_in(PG_FUNCTION_ARGS) { char *input_string = PG_GETARG_CSTRING(0); #ifdef NOT_USED Oid typelem = PG_GETARG_OID(1); #endif int32 atttypmod = PG_GETARG_INT32(2); VarBit *result; /* The resulting bit string */ char *sp; /* pointer into the character string */ bits8 *r; /* pointer into the result */ int len, /* Length of the whole data structure */ bitlen, /* Number of bits in the bit string */ slen; /* Length of the input string */ bool bit_not_hex; /* false = hex string true = bit string */ int bc; bits8 x = 0; /* Check that the first character is a b or an x */ if (input_string[0] == 'b' || input_string[0] == 'B') { bit_not_hex = true; sp = input_string + 1; } else if (input_string[0] == 'x' || input_string[0] == 'X') { bit_not_hex = false; sp = input_string + 1; } else { /* * Otherwise it's binary. This allows things like cast('1001' as bit) * to work transparently. */ bit_not_hex = true; sp = input_string; } /* * Determine bitlength from input string. MaxAllocSize ensures a regular * input is small enough, but we must check hex input. */ slen = strlen(sp); if (bit_not_hex) bitlen = slen; else { if (slen > VARBITMAXLEN / 4) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("bit string length exceeds the maximum allowed (%d)", VARBITMAXLEN))); bitlen = slen * 4; } /* * Sometimes atttypmod is not supplied. If it is supplied we need to make * sure that the bitstring fits. */ if (atttypmod <= 0) atttypmod = bitlen; else if (bitlen != atttypmod) ereport(ERROR, (errcode(ERRCODE_STRING_DATA_LENGTH_MISMATCH), errmsg("bit string length %d does not match type bit(%d)", bitlen, atttypmod))); len = VARBITTOTALLEN(atttypmod); /* set to 0 so that *r is always initialised and string is zero-padded */ result = (VarBit *) palloc0(len); SET_VARSIZE(result, len); VARBITLEN(result) = atttypmod; r = VARBITS(result); if (bit_not_hex) { /* Parse the bit representation of the string */ /* We know it fits, as bitlen was compared to atttypmod */ x = HIGHBIT; for (; *sp; sp++) { if (*sp == '1') *r |= x; else if (*sp != '0') ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("\"%c\" is not a valid binary digit", *sp))); x >>= 1; if (x == 0) { x = HIGHBIT; r++; } } } else { /* Parse the hex representation of the string */ for (bc = 0; *sp; sp++) { if (*sp >= '0' && *sp <= '9') x = (bits8) (*sp - '0'); else if (*sp >= 'A' && *sp <= 'F') x = (bits8) (*sp - 'A') + 10; else if (*sp >= 'a' && *sp <= 'f') x = (bits8) (*sp - 'a') + 10; else ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("\"%c\" is not a valid hexadecimal digit", *sp))); if (bc) { *r++ |= x; bc = 0; } else { *r = x << 4; bc = 1; } } } PG_RETURN_VARBIT_P(result); }
166,418
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void setRewriteURLFolder(const char* folder) { m_rewriteFolder = folder; } Commit Message: Revert 162155 "This review merges the two existing page serializ..." Change r162155 broke the world even though it was landed using the CQ. > This review merges the two existing page serializers, WebPageSerializerImpl and > PageSerializer, into one, PageSerializer. In addition to this it moves all > the old tests from WebPageNewSerializerTest and WebPageSerializerTest to the > PageSerializerTest structure and splits out one test for MHTML into a new > MHTMLTest file. > > Saving as 'Webpage, Complete', 'Webpage, HTML Only' and as MHTML when the > 'Save Page as MHTML' flag is enabled now uses the same code, and should thus > have the same feature set. Meaning that both modes now should be a bit better. > > Detailed list of changes: > > - PageSerializerTest: Prepare for more DTD test > - PageSerializerTest: Remove now unneccesary input image test > - PageSerializerTest: Remove unused WebPageSerializer/Impl code > - PageSerializerTest: Move data URI morph test > - PageSerializerTest: Move data URI test > - PageSerializerTest: Move namespace test > - PageSerializerTest: Move SVG Image test > - MHTMLTest: Move MHTML specific test to own test file > - PageSerializerTest: Delete duplicate XML header test > - PageSerializerTest: Move blank frame test > - PageSerializerTest: Move CSS test > - PageSerializerTest: Add frameset/frame test > - PageSerializerTest: Move old iframe test > - PageSerializerTest: Move old elements test > - Use PageSerizer for saving web pages > - PageSerializerTest: Test for rewriting links > - PageSerializer: Add rewrite link accumulator > - PageSerializer: Serialize images in iframes/frames src > - PageSerializer: XHTML fix for meta tags > - PageSerializer: Add presentation CSS > - PageSerializer: Rename out parameter > > BUG= > [email protected] > > Review URL: https://codereview.chromium.org/68613003 [email protected] Review URL: https://codereview.chromium.org/73673003 git-svn-id: svn://svn.chromium.org/blink/trunk@162156 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-119
void setRewriteURLFolder(const char* folder)
171,577
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; register ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = 1.0f / MaxF(w.x,MaxF(w.y,w.z)); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); } Commit Message: Added extra EOF check and some minor refactoring. CWE ID: CWE-20
static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; register ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = 1.0f / MagickMax(w.x,MagickMax(w.y,w.z)); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); }
168,898
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->private; pte_t *pte, ptent; spinlock_t *ptl; struct page *page; split_huge_page_pmd(walk->mm, pmd); pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) { ptent = *pte; if (!pte_present(ptent)) continue; page = vm_normal_page(vma, addr, ptent); if (!page) continue; if (PageReserved(page)) continue; /* Clear accessed and referenced bits. */ ptep_test_and_clear_young(vma, addr, pte); ClearPageReferenced(page); } pte_unmap_unlock(pte - 1, ptl); cond_resched(); return 0; } Commit Message: mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode commit 1a5a9906d4e8d1976b701f889d8f35d54b928f25 upstream. In some cases it may happen that pmd_none_or_clear_bad() is called with the mmap_sem hold in read mode. In those cases the huge page faults can allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a false positive from pmd_bad() that will not like to see a pmd materializing as trans huge. It's not khugepaged causing the problem, khugepaged holds the mmap_sem in write mode (and all those sites must hold the mmap_sem in read mode to prevent pagetables to go away from under them, during code review it seems vm86 mode on 32bit kernels requires that too unless it's restricted to 1 thread per process or UP builds). The race is only with the huge pagefaults that can convert a pmd_none() into a pmd_trans_huge(). Effectively all these pmd_none_or_clear_bad() sites running with mmap_sem in read mode are somewhat speculative with the page faults, and the result is always undefined when they run simultaneously. This is probably why it wasn't common to run into this. For example if the madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page fault, the hugepage will not be zapped, if the page fault runs first it will be zapped. Altering pmd_bad() not to error out if it finds hugepmds won't be enough to fix this, because zap_pmd_range would then proceed to call zap_pte_range (which would be incorrect if the pmd become a pmd_trans_huge()). The simplest way to fix this is to read the pmd in the local stack (regardless of what we read, no need of actual CPU barriers, only compiler barrier needed), and be sure it is not changing under the code that computes its value. Even if the real pmd is changing under the value we hold on the stack, we don't care. If we actually end up in zap_pte_range it means the pmd was not none already and it was not huge, and it can't become huge from under us (khugepaged locking explained above). All we need is to enforce that there is no way anymore that in a code path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad can run into a hugepmd. The overhead of a barrier() is just a compiler tweak and should not be measurable (I only added it for THP builds). I don't exclude different compiler versions may have prevented the race too by caching the value of *pmd on the stack (that hasn't been verified, but it wouldn't be impossible considering pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines and there's no external function called in between pmd_trans_huge and pmd_none_or_clear_bad). if (pmd_trans_huge(*pmd)) { if (next-addr != HPAGE_PMD_SIZE) { VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); split_huge_page_pmd(vma->vm_mm, pmd); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) continue; /* fall through */ } if (pmd_none_or_clear_bad(pmd)) Because this race condition could be exercised without special privileges this was reported in CVE-2012-1179. The race was identified and fully explained by Ulrich who debugged it. I'm quoting his accurate explanation below, for reference. ====== start quote ======= mapcount 0 page_mapcount 1 kernel BUG at mm/huge_memory.c:1384! At some point prior to the panic, a "bad pmd ..." message similar to the following is logged on the console: mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7). The "bad pmd ..." message is logged by pmd_clear_bad() before it clears the page's PMD table entry. 143 void pmd_clear_bad(pmd_t *pmd) 144 { -> 145 pmd_ERROR(*pmd); 146 pmd_clear(pmd); 147 } After the PMD table entry has been cleared, there is an inconsistency between the actual number of PMD table entries that are mapping the page and the page's map count (_mapcount field in struct page). When the page is subsequently reclaimed, __split_huge_page() detects this inconsistency. 1381 if (mapcount != page_mapcount(page)) 1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1383 mapcount, page_mapcount(page)); -> 1384 BUG_ON(mapcount != page_mapcount(page)); The root cause of the problem is a race of two threads in a multithreaded process. Thread B incurs a page fault on a virtual address that has never been accessed (PMD entry is zero) while Thread A is executing an madvise() system call on a virtual address within the same 2 MB (huge page) range. virtual address space .---------------------. | | | | .-|---------------------| | | | | | |<-- B(fault) | | | 2 MB | |/////////////////////|-. huge < |/////////////////////| > A(range) page | |/////////////////////|-' | | | | | | '-|---------------------| | | | | '---------------------' - Thread A is executing an madvise(..., MADV_DONTNEED) system call on the virtual address range "A(range)" shown in the picture. sys_madvise // Acquire the semaphore in shared mode. down_read(&current->mm->mmap_sem) ... madvise_vma switch (behavior) case MADV_DONTNEED: madvise_dontneed zap_page_range unmap_vmas unmap_page_range zap_pud_range zap_pmd_range // // Assume that this huge page has never been accessed. // I.e. content of the PMD entry is zero (not mapped). // if (pmd_trans_huge(*pmd)) { // We don't get here due to the above assumption. } // // Assume that Thread B incurred a page fault and .---------> // sneaks in here as shown below. | // | if (pmd_none_or_clear_bad(pmd)) | { | if (unlikely(pmd_bad(*pmd))) | pmd_clear_bad | { | pmd_ERROR | // Log "bad pmd ..." message here. | pmd_clear | // Clear the page's PMD entry. | // Thread B incremented the map count | // in page_add_new_anon_rmap(), but | // now the page is no longer mapped | // by a PMD entry (-> inconsistency). | } | } | v - Thread B is handling a page fault on virtual address "B(fault)" shown in the picture. ... do_page_fault __do_page_fault // Acquire the semaphore in shared mode. down_read_trylock(&mm->mmap_sem) ... handle_mm_fault if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) // We get here due to the above assumption (PMD entry is zero). do_huge_pmd_anonymous_page alloc_hugepage_vma // Allocate a new transparent huge page here. ... __do_huge_pmd_anonymous_page ... spin_lock(&mm->page_table_lock) ... page_add_new_anon_rmap // Here we increment the page's map count (starts at -1). atomic_set(&page->_mapcount, 0) set_pmd_at // Here we set the page's PMD entry which will be cleared // when Thread A calls pmd_clear_bad(). ... spin_unlock(&mm->page_table_lock) The mmap_sem does not prevent the race because both threads are acquiring it in shared mode (down_read). Thread B holds the page_table_lock while the page's map count and PMD table entry are updated. However, Thread A does not synchronize on that lock. ====== end quote ======= [[email protected]: checkpatch fixes] Reported-by: Ulrich Obergfell <[email protected]> Signed-off-by: Andrea Arcangeli <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Dave Jones <[email protected]> Acked-by: Larry Woodman <[email protected]> Acked-by: Rik van Riel <[email protected]> Cc: Mark Salter <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]> CWE ID: CWE-264
static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->private; pte_t *pte, ptent; spinlock_t *ptl; struct page *page; split_huge_page_pmd(walk->mm, pmd); if (pmd_trans_unstable(pmd)) return 0; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) { ptent = *pte; if (!pte_present(ptent)) continue; page = vm_normal_page(vma, addr, ptent); if (!page) continue; if (PageReserved(page)) continue; /* Clear accessed and referenced bits. */ ptep_test_and_clear_young(vma, addr, pte); ClearPageReferenced(page); } pte_unmap_unlock(pte - 1, ptl); cond_resched(); return 0; }
165,627
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: icmp6_rrenum_print(netdissect_options *ndo, const u_char *bp, const u_char *ep) { const struct icmp6_router_renum *rr6; const char *cp; const struct rr_pco_match *match; const struct rr_pco_use *use; char hbuf[NI_MAXHOST]; int n; if (ep < bp) return; rr6 = (const struct icmp6_router_renum *)bp; cp = (const char *)(rr6 + 1); ND_TCHECK(rr6->rr_reserved); switch (rr6->rr_code) { case ICMP6_ROUTER_RENUMBERING_COMMAND: ND_PRINT((ndo,"router renum: command")); break; case ICMP6_ROUTER_RENUMBERING_RESULT: ND_PRINT((ndo,"router renum: result")); break; case ICMP6_ROUTER_RENUMBERING_SEQNUM_RESET: ND_PRINT((ndo,"router renum: sequence number reset")); break; default: ND_PRINT((ndo,"router renum: code-#%d", rr6->rr_code)); break; } ND_PRINT((ndo,", seq=%u", EXTRACT_32BITS(&rr6->rr_seqnum))); if (ndo->ndo_vflag) { #define F(x, y) ((rr6->rr_flags) & (x) ? (y) : "") ND_PRINT((ndo,"[")); /*]*/ if (rr6->rr_flags) { ND_PRINT((ndo,"%s%s%s%s%s,", F(ICMP6_RR_FLAGS_TEST, "T"), F(ICMP6_RR_FLAGS_REQRESULT, "R"), F(ICMP6_RR_FLAGS_FORCEAPPLY, "A"), F(ICMP6_RR_FLAGS_SPECSITE, "S"), F(ICMP6_RR_FLAGS_PREVDONE, "P"))); } ND_PRINT((ndo,"seg=%u,", rr6->rr_segnum)); ND_PRINT((ndo,"maxdelay=%u", EXTRACT_16BITS(&rr6->rr_maxdelay))); if (rr6->rr_reserved) ND_PRINT((ndo,"rsvd=0x%x", EXTRACT_32BITS(&rr6->rr_reserved))); /*[*/ ND_PRINT((ndo,"]")); #undef F } if (rr6->rr_code == ICMP6_ROUTER_RENUMBERING_COMMAND) { match = (const struct rr_pco_match *)cp; cp = (const char *)(match + 1); ND_TCHECK(match->rpm_prefix); if (ndo->ndo_vflag > 1) ND_PRINT((ndo,"\n\t")); else ND_PRINT((ndo," ")); ND_PRINT((ndo,"match(")); /*)*/ switch (match->rpm_code) { case RPM_PCO_ADD: ND_PRINT((ndo,"add")); break; case RPM_PCO_CHANGE: ND_PRINT((ndo,"change")); break; case RPM_PCO_SETGLOBAL: ND_PRINT((ndo,"setglobal")); break; default: ND_PRINT((ndo,"#%u", match->rpm_code)); break; } if (ndo->ndo_vflag) { ND_PRINT((ndo,",ord=%u", match->rpm_ordinal)); ND_PRINT((ndo,",min=%u", match->rpm_minlen)); ND_PRINT((ndo,",max=%u", match->rpm_maxlen)); } if (addrtostr6(&match->rpm_prefix, hbuf, sizeof(hbuf))) ND_PRINT((ndo,",%s/%u", hbuf, match->rpm_matchlen)); else ND_PRINT((ndo,",?/%u", match->rpm_matchlen)); /*(*/ ND_PRINT((ndo,")")); n = match->rpm_len - 3; if (n % 4) goto trunc; n /= 4; while (n-- > 0) { use = (const struct rr_pco_use *)cp; cp = (const char *)(use + 1); ND_TCHECK(use->rpu_prefix); if (ndo->ndo_vflag > 1) ND_PRINT((ndo,"\n\t")); else ND_PRINT((ndo," ")); ND_PRINT((ndo,"use(")); /*)*/ if (use->rpu_flags) { #define F(x, y) ((use->rpu_flags) & (x) ? (y) : "") ND_PRINT((ndo,"%s%s,", F(ICMP6_RR_PCOUSE_FLAGS_DECRVLTIME, "V"), F(ICMP6_RR_PCOUSE_FLAGS_DECRPLTIME, "P"))); #undef F } if (ndo->ndo_vflag) { ND_PRINT((ndo,"mask=0x%x,", use->rpu_ramask)); ND_PRINT((ndo,"raflags=0x%x,", use->rpu_raflags)); if (~use->rpu_vltime == 0) ND_PRINT((ndo,"vltime=infty,")); else ND_PRINT((ndo,"vltime=%u,", EXTRACT_32BITS(&use->rpu_vltime))); if (~use->rpu_pltime == 0) ND_PRINT((ndo,"pltime=infty,")); else ND_PRINT((ndo,"pltime=%u,", EXTRACT_32BITS(&use->rpu_pltime))); } if (addrtostr6(&use->rpu_prefix, hbuf, sizeof(hbuf))) ND_PRINT((ndo,"%s/%u/%u", hbuf, use->rpu_uselen, use->rpu_keeplen)); else ND_PRINT((ndo,"?/%u/%u", use->rpu_uselen, use->rpu_keeplen)); /*(*/ ND_PRINT((ndo,")")); } } return; trunc: ND_PRINT((ndo,"[|icmp6]")); } Commit Message: (for 4.9.3) CVE-2018-14882/ICMP6 RPL: Add a missing bounds check Moreover: Add and use *_tstr[] strings. Update four tests outputs accordingly. Fix a space. Wang Junjie of 360 ESG Codesafe Team had independently identified this vulnerability in 2018 by means of fuzzing and provided the packet capture file for the test. CWE ID: CWE-125
icmp6_rrenum_print(netdissect_options *ndo, const u_char *bp, const u_char *ep) { const struct icmp6_router_renum *rr6; const char *cp; const struct rr_pco_match *match; const struct rr_pco_use *use; char hbuf[NI_MAXHOST]; int n; if (ep < bp) return; rr6 = (const struct icmp6_router_renum *)bp; cp = (const char *)(rr6 + 1); ND_TCHECK(rr6->rr_reserved); switch (rr6->rr_code) { case ICMP6_ROUTER_RENUMBERING_COMMAND: ND_PRINT((ndo,"router renum: command")); break; case ICMP6_ROUTER_RENUMBERING_RESULT: ND_PRINT((ndo,"router renum: result")); break; case ICMP6_ROUTER_RENUMBERING_SEQNUM_RESET: ND_PRINT((ndo,"router renum: sequence number reset")); break; default: ND_PRINT((ndo,"router renum: code-#%d", rr6->rr_code)); break; } ND_PRINT((ndo,", seq=%u", EXTRACT_32BITS(&rr6->rr_seqnum))); if (ndo->ndo_vflag) { #define F(x, y) ((rr6->rr_flags) & (x) ? (y) : "") ND_PRINT((ndo,"[")); /*]*/ if (rr6->rr_flags) { ND_PRINT((ndo,"%s%s%s%s%s,", F(ICMP6_RR_FLAGS_TEST, "T"), F(ICMP6_RR_FLAGS_REQRESULT, "R"), F(ICMP6_RR_FLAGS_FORCEAPPLY, "A"), F(ICMP6_RR_FLAGS_SPECSITE, "S"), F(ICMP6_RR_FLAGS_PREVDONE, "P"))); } ND_PRINT((ndo,"seg=%u,", rr6->rr_segnum)); ND_PRINT((ndo,"maxdelay=%u", EXTRACT_16BITS(&rr6->rr_maxdelay))); if (rr6->rr_reserved) ND_PRINT((ndo,"rsvd=0x%x", EXTRACT_32BITS(&rr6->rr_reserved))); /*[*/ ND_PRINT((ndo,"]")); #undef F } if (rr6->rr_code == ICMP6_ROUTER_RENUMBERING_COMMAND) { match = (const struct rr_pco_match *)cp; cp = (const char *)(match + 1); ND_TCHECK(match->rpm_prefix); if (ndo->ndo_vflag > 1) ND_PRINT((ndo,"\n\t")); else ND_PRINT((ndo," ")); ND_PRINT((ndo,"match(")); /*)*/ switch (match->rpm_code) { case RPM_PCO_ADD: ND_PRINT((ndo,"add")); break; case RPM_PCO_CHANGE: ND_PRINT((ndo,"change")); break; case RPM_PCO_SETGLOBAL: ND_PRINT((ndo,"setglobal")); break; default: ND_PRINT((ndo,"#%u", match->rpm_code)); break; } if (ndo->ndo_vflag) { ND_PRINT((ndo,",ord=%u", match->rpm_ordinal)); ND_PRINT((ndo,",min=%u", match->rpm_minlen)); ND_PRINT((ndo,",max=%u", match->rpm_maxlen)); } if (addrtostr6(&match->rpm_prefix, hbuf, sizeof(hbuf))) ND_PRINT((ndo,",%s/%u", hbuf, match->rpm_matchlen)); else ND_PRINT((ndo,",?/%u", match->rpm_matchlen)); /*(*/ ND_PRINT((ndo,")")); n = match->rpm_len - 3; if (n % 4) goto trunc; n /= 4; while (n-- > 0) { use = (const struct rr_pco_use *)cp; cp = (const char *)(use + 1); ND_TCHECK(use->rpu_prefix); if (ndo->ndo_vflag > 1) ND_PRINT((ndo,"\n\t")); else ND_PRINT((ndo," ")); ND_PRINT((ndo,"use(")); /*)*/ if (use->rpu_flags) { #define F(x, y) ((use->rpu_flags) & (x) ? (y) : "") ND_PRINT((ndo,"%s%s,", F(ICMP6_RR_PCOUSE_FLAGS_DECRVLTIME, "V"), F(ICMP6_RR_PCOUSE_FLAGS_DECRPLTIME, "P"))); #undef F } if (ndo->ndo_vflag) { ND_PRINT((ndo,"mask=0x%x,", use->rpu_ramask)); ND_PRINT((ndo,"raflags=0x%x,", use->rpu_raflags)); if (~use->rpu_vltime == 0) ND_PRINT((ndo,"vltime=infty,")); else ND_PRINT((ndo,"vltime=%u,", EXTRACT_32BITS(&use->rpu_vltime))); if (~use->rpu_pltime == 0) ND_PRINT((ndo,"pltime=infty,")); else ND_PRINT((ndo,"pltime=%u,", EXTRACT_32BITS(&use->rpu_pltime))); } if (addrtostr6(&use->rpu_prefix, hbuf, sizeof(hbuf))) ND_PRINT((ndo,"%s/%u/%u", hbuf, use->rpu_uselen, use->rpu_keeplen)); else ND_PRINT((ndo,"?/%u/%u", use->rpu_uselen, use->rpu_keeplen)); /*(*/ ND_PRINT((ndo,")")); } } return; trunc: ND_PRINT((ndo, "%s", icmp6_tstr)); }
169,825
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { u8 perm_addr[MAX_ADDR_LEN]; if (!netdev->dcbnl_ops->getpermhwaddr) return -EOPNOTSUPP; netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); } Commit Message: dcbnl: fix various netlink info leaks The dcb netlink interface leaks stack memory in various places: * perm_addr[] buffer is only filled at max with 12 of the 32 bytes but copied completely, * no in-kernel driver fills all fields of an IEEE 802.1Qaz subcommand, so we're leaking up to 58 bytes for ieee_ets structs, up to 136 bytes for ieee_pfc structs, etc., * the same is true for CEE -- no in-kernel driver fills the whole struct, Prevent all of the above stack info leaks by properly initializing the buffers/structures involved. Signed-off-by: Mathias Krause <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-399
static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { u8 perm_addr[MAX_ADDR_LEN]; if (!netdev->dcbnl_ops->getpermhwaddr) return -EOPNOTSUPP; memset(perm_addr, 0, sizeof(perm_addr)); netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); }
166,058
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int DecodeGifImg(struct ngiflib_img * i) { struct ngiflib_decode_context context; long npix; u8 * stackp; u8 * stack_top; u16 clr; u16 eof; u16 free; u16 act_code = 0; u16 old_code = 0; u16 read_byt; u16 ab_prfx[4096]; u8 ab_suffx[4096]; u8 ab_stack[4096]; u8 flags; u8 casspecial = 0; if(!i) return -1; i->posX = GetWord(i->parent); /* offsetX */ i->posY = GetWord(i->parent); /* offsetY */ i->width = GetWord(i->parent); /* SizeX */ i->height = GetWord(i->parent); /* SizeY */ if((i->width > i->parent->width) || (i->height > i->parent->height)) { #if !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "*** ERROR *** Image bigger than global GIF canvas !\n"); #endif return -1; } if((i->posX + i->width) > i->parent->width) { #if !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "*** WARNING *** Adjusting X position\n"); #endif i->posX = i->parent->width - i->width; } if((i->posY + i->height) > i->parent->height) { #if !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "*** WARNING *** Adjusting Y position\n"); #endif i->posY = i->parent->height - i->height; } context.Xtogo = i->width; context.curY = i->posY; #ifdef NGIFLIB_INDEXED_ONLY #ifdef NGIFLIB_ENABLE_CALLBACKS context.line_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width; context.frbuff_p.p8 = context.line_p.p8 + i->posX; #else context.frbuff_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ #else if(i->parent->mode & NGIFLIB_MODE_INDEXED) { #ifdef NGIFLIB_ENABLE_CALLBACKS context.line_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width; context.frbuff_p.p8 = context.line_p.p8 + i->posX; #else context.frbuff_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ } else { #ifdef NGIFLIB_ENABLE_CALLBACKS context.line_p.p32 = i->parent->frbuff.p32 + (u32)i->posY*i->parent->width; context.frbuff_p.p32 = context.line_p.p32 + i->posX; #else context.frbuff_p.p32 = i->parent->frbuff.p32 + (u32)i->posY*i->parent->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ } #endif /* NGIFLIB_INDEXED_ONLY */ npix = (long)i->width * i->height; flags = GetByte(i->parent); i->interlaced = (flags & 64) >> 6; context.pass = i->interlaced ? 1 : 0; i->sort_flag = (flags & 32) >> 5; /* is local palette sorted by color frequency ? */ i->localpalbits = (flags & 7) + 1; if(flags&128) { /* palette locale */ int k; int localpalsize = 1 << i->localpalbits; #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "Local palette\n"); #endif /* !defined(NGIFLIB_NO_FILE) */ i->palette = (struct ngiflib_rgb *)ngiflib_malloc(sizeof(struct ngiflib_rgb)*localpalsize); for(k=0; k<localpalsize; k++) { i->palette[k].r = GetByte(i->parent); i->palette[k].g = GetByte(i->parent); i->palette[k].b = GetByte(i->parent); } #ifdef NGIFLIB_ENABLE_CALLBACKS if(i->parent->palette_cb) i->parent->palette_cb(i->parent, i->palette, localpalsize); #endif /* NGIFLIB_ENABLE_CALLBACKS */ } else { i->palette = i->parent->palette; i->localpalbits = i->parent->imgbits; } i->ncolors = 1 << i->localpalbits; i->imgbits = GetByte(i->parent); /* LZW Minimum Code Size */ #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) { if(i->interlaced) fprintf(i->parent->log, "interlaced "); fprintf(i->parent->log, "img pos(%hu,%hu) size %hux%hu palbits=%hhu imgbits=%hhu ncolors=%hu\n", i->posX, i->posY, i->width, i->height, i->localpalbits, i->imgbits, i->ncolors); } #endif /* !defined(NGIFLIB_NO_FILE) */ if(i->imgbits==1) { /* fix for 1bit images ? */ i->imgbits = 2; } clr = 1 << i->imgbits; eof = clr + 1; free = clr + 2; context.nbbit = i->imgbits + 1; context.max = clr + clr - 1; /* (1 << context.nbbit) - 1 */ stackp = stack_top = ab_stack + 4096; context.restbits = 0; /* initialise le "buffer" de lecture */ context.restbyte = 0; /* des codes LZW */ context.lbyte = 0; for(;;) { act_code = GetGifWord(i, &context); if(act_code==eof) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "End of image code\n"); #endif /* !defined(NGIFLIB_NO_FILE) */ return 0; } if(npix==0) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "assez de pixels, On se casse !\n"); #endif /* !defined(NGIFLIB_NO_FILE) */ return 1; } if(act_code==clr) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "Code clear (free=%hu) npix=%ld\n", free, npix); #endif /* !defined(NGIFLIB_NO_FILE) */ /* clear */ free = clr + 2; context.nbbit = i->imgbits + 1; context.max = clr + clr - 1; /* (1 << context.nbbit) - 1 */ act_code = GetGifWord(i, &context); casspecial = (u8)act_code; old_code = act_code; WritePixel(i, &context, casspecial); npix--; } else { read_byt = act_code; if(act_code >= free) { /* code pas encore dans alphabet */ /* printf("Code pas dans alphabet : %d>=%d push %d\n", act_code, free, casspecial); */ *(--stackp) = casspecial; /* dernier debut de chaine ! */ act_code = old_code; } /* printf("actcode=%d\n", act_code); */ while(act_code > clr) { /* code non concret */ /* fillstackloop empile les suffixes ! */ *(--stackp) = ab_suffx[act_code]; act_code = ab_prfx[act_code]; /* prefixe */ } /* act_code est concret */ casspecial = (u8)act_code; /* dernier debut de chaine ! */ *(--stackp) = casspecial; /* push on stack */ WritePixels(i, &context, stackp, stack_top - stackp); /* unstack all pixels at once */ npix -= (stack_top - stackp); stackp = stack_top; /* putchar('\n'); */ if(free < 4096) { /* la taille du dico est 4096 max ! */ ab_prfx[free] = old_code; ab_suffx[free] = (u8)act_code; free++; if((free > context.max) && (context.nbbit < 12)) { context.nbbit++; /* 1 bit de plus pour les codes LZW */ context.max += context.max + 1; } } old_code = read_byt; } } return 0; } Commit Message: fix "pixel overrun" fixes #3 CWE ID: CWE-119
static int DecodeGifImg(struct ngiflib_img * i) { struct ngiflib_decode_context context; long npix; u8 * stackp; u8 * stack_top; u16 clr; u16 eof; u16 free; u16 act_code = 0; u16 old_code = 0; u16 read_byt; u16 ab_prfx[4096]; u8 ab_suffx[4096]; u8 ab_stack[4096]; u8 flags; u8 casspecial = 0; if(!i) return -1; i->posX = GetWord(i->parent); /* offsetX */ i->posY = GetWord(i->parent); /* offsetY */ i->width = GetWord(i->parent); /* SizeX */ i->height = GetWord(i->parent); /* SizeY */ if((i->width > i->parent->width) || (i->height > i->parent->height)) { #if !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "*** ERROR *** Image bigger than global GIF canvas !\n"); #endif return -1; } if((i->posX + i->width) > i->parent->width) { #if !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "*** WARNING *** Adjusting X position\n"); #endif i->posX = i->parent->width - i->width; } if((i->posY + i->height) > i->parent->height) { #if !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "*** WARNING *** Adjusting Y position\n"); #endif i->posY = i->parent->height - i->height; } context.Xtogo = i->width; context.curY = i->posY; #ifdef NGIFLIB_INDEXED_ONLY #ifdef NGIFLIB_ENABLE_CALLBACKS context.line_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width; context.frbuff_p.p8 = context.line_p.p8 + i->posX; #else context.frbuff_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ #else if(i->parent->mode & NGIFLIB_MODE_INDEXED) { #ifdef NGIFLIB_ENABLE_CALLBACKS context.line_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width; context.frbuff_p.p8 = context.line_p.p8 + i->posX; #else context.frbuff_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ } else { #ifdef NGIFLIB_ENABLE_CALLBACKS context.line_p.p32 = i->parent->frbuff.p32 + (u32)i->posY*i->parent->width; context.frbuff_p.p32 = context.line_p.p32 + i->posX; #else context.frbuff_p.p32 = i->parent->frbuff.p32 + (u32)i->posY*i->parent->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ } #endif /* NGIFLIB_INDEXED_ONLY */ npix = (long)i->width * i->height; flags = GetByte(i->parent); i->interlaced = (flags & 64) >> 6; context.pass = i->interlaced ? 1 : 0; i->sort_flag = (flags & 32) >> 5; /* is local palette sorted by color frequency ? */ i->localpalbits = (flags & 7) + 1; if(flags&128) { /* palette locale */ int k; int localpalsize = 1 << i->localpalbits; #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "Local palette\n"); #endif /* !defined(NGIFLIB_NO_FILE) */ i->palette = (struct ngiflib_rgb *)ngiflib_malloc(sizeof(struct ngiflib_rgb)*localpalsize); for(k=0; k<localpalsize; k++) { i->palette[k].r = GetByte(i->parent); i->palette[k].g = GetByte(i->parent); i->palette[k].b = GetByte(i->parent); } #ifdef NGIFLIB_ENABLE_CALLBACKS if(i->parent->palette_cb) i->parent->palette_cb(i->parent, i->palette, localpalsize); #endif /* NGIFLIB_ENABLE_CALLBACKS */ } else { i->palette = i->parent->palette; i->localpalbits = i->parent->imgbits; } i->ncolors = 1 << i->localpalbits; i->imgbits = GetByte(i->parent); /* LZW Minimum Code Size */ #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) { if(i->interlaced) fprintf(i->parent->log, "interlaced "); fprintf(i->parent->log, "img pos(%hu,%hu) size %hux%hu palbits=%hhu imgbits=%hhu ncolors=%hu\n", i->posX, i->posY, i->width, i->height, i->localpalbits, i->imgbits, i->ncolors); } #endif /* !defined(NGIFLIB_NO_FILE) */ if(i->imgbits==1) { /* fix for 1bit images ? */ i->imgbits = 2; } clr = 1 << i->imgbits; eof = clr + 1; free = clr + 2; context.nbbit = i->imgbits + 1; context.max = clr + clr - 1; /* (1 << context.nbbit) - 1 */ stackp = stack_top = ab_stack + 4096; context.restbits = 0; /* initialise le "buffer" de lecture */ context.restbyte = 0; /* des codes LZW */ context.lbyte = 0; for(;;) { act_code = GetGifWord(i, &context); if(act_code==eof) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "End of image code\n"); #endif /* !defined(NGIFLIB_NO_FILE) */ return 0; } if(npix==0) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "assez de pixels, On se casse !\n"); #endif /* !defined(NGIFLIB_NO_FILE) */ return 1; } if(act_code==clr) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "Code clear (free=%hu) npix=%ld\n", free, npix); #endif /* !defined(NGIFLIB_NO_FILE) */ /* clear */ free = clr + 2; context.nbbit = i->imgbits + 1; context.max = clr + clr - 1; /* (1 << context.nbbit) - 1 */ act_code = GetGifWord(i, &context); casspecial = (u8)act_code; old_code = act_code; if(npix > 0) WritePixel(i, &context, casspecial); npix--; } else { read_byt = act_code; if(act_code >= free) { /* code pas encore dans alphabet */ /* printf("Code pas dans alphabet : %d>=%d push %d\n", act_code, free, casspecial); */ *(--stackp) = casspecial; /* dernier debut de chaine ! */ act_code = old_code; } /* printf("actcode=%d\n", act_code); */ while(act_code > clr) { /* code non concret */ /* fillstackloop empile les suffixes ! */ *(--stackp) = ab_suffx[act_code]; act_code = ab_prfx[act_code]; /* prefixe */ } /* act_code est concret */ casspecial = (u8)act_code; /* dernier debut de chaine ! */ *(--stackp) = casspecial; /* push on stack */ if(npix >= (stack_top - stackp)) { WritePixels(i, &context, stackp, stack_top - stackp); /* unstack all pixels at once */ } else if(npix > 0) { /* "pixel overflow" */ WritePixels(i, &context, stackp, npix); } npix -= (stack_top - stackp); stackp = stack_top; /* putchar('\n'); */ if(free < 4096) { /* la taille du dico est 4096 max ! */ ab_prfx[free] = old_code; ab_suffx[free] = (u8)act_code; free++; if((free > context.max) && (context.nbbit < 12)) { context.nbbit++; /* 1 bit de plus pour les codes LZW */ context.max += context.max + 1; } } old_code = read_byt; } } return 0; }
169,246
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void PlatformSensorFusion::Create( mojo::ScopedSharedBufferMapping mapping, PlatformSensorProvider* provider, std::unique_ptr<PlatformSensorFusionAlgorithm> fusion_algorithm, const PlatformSensorProviderBase::CreateSensorCallback& callback) { Factory::CreateSensorFusion(std::move(mapping), std::move(fusion_algorithm), callback, provider); } Commit Message: android: Fix sensors in device service. This patch fixes a bug that prevented more than one sensor data to be available at once when using the device motion/orientation API. The issue was introduced by this other patch [1] which fixed some security-related issues in the way shared memory region handles are managed throughout Chromium (more details at https://crbug.com/789959). The device service´s sensor implementation doesn´t work correctly because it assumes it is possible to create a writable mapping of a given shared memory region at any time. This assumption is not correct on Android, once an Ashmem region has been turned read-only, such mappings are no longer possible. To fix the implementation, this CL changes the following: - PlatformSensor used to require moving a mojo::ScopedSharedBufferMapping into the newly-created instance. Said mapping being owned by and destroyed with the PlatformSensor instance. With this patch, the constructor instead takes a single pointer to the corresponding SensorReadingSharedBuffer, i.e. the area in memory where the sensor-specific reading data is located, and can be either updated or read-from. Note that the PlatformSensor does not own the mapping anymore. - PlatformSensorProviderBase holds the *single* writable mapping that is used to store all SensorReadingSharedBuffer buffers. It is created just after the region itself, and thus can be used even after the region's access mode has been changed to read-only. Addresses within the mapping will be passed to PlatformSensor constructors, computed from the mapping's base address plus a sensor-specific offset. The mapping is now owned by the PlatformSensorProviderBase instance. Note that, security-wise, nothing changes, because all mojo::ScopedSharedBufferMapping before the patch actually pointed to the same writable-page in memory anyway. Since unit or integration tests didn't catch the regression when [1] was submitted, this patch was tested manually by running a newly-built Chrome apk in the Android emulator and on a real device running Android O. [1] https://chromium-review.googlesource.com/c/chromium/src/+/805238 BUG=805146 [email protected],[email protected],[email protected],[email protected] Change-Id: I7d60a1cad278f48c361d2ece5a90de10eb082b44 Reviewed-on: https://chromium-review.googlesource.com/891180 Commit-Queue: David Turner <[email protected]> Reviewed-by: Reilly Grant <[email protected]> Reviewed-by: Matthew Cary <[email protected]> Reviewed-by: Alexandr Ilin <[email protected]> Cr-Commit-Position: refs/heads/master@{#532607} CWE ID: CWE-732
void PlatformSensorFusion::Create( SensorReadingSharedBuffer* reading_buffer, PlatformSensorProvider* provider, std::unique_ptr<PlatformSensorFusionAlgorithm> fusion_algorithm, const PlatformSensorProviderBase::CreateSensorCallback& callback) { Factory::CreateSensorFusion(reading_buffer, std::move(fusion_algorithm), callback, provider); }
172,827
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void user_describe(const struct key *key, struct seq_file *m) { seq_puts(m, key->description); if (key_is_instantiated(key)) seq_printf(m, ": %u", key->datalen); } Commit Message: KEYS: Fix race between updating and finding a negative key Consolidate KEY_FLAG_INSTANTIATED, KEY_FLAG_NEGATIVE and the rejection error into one field such that: (1) The instantiation state can be modified/read atomically. (2) The error can be accessed atomically with the state. (3) The error isn't stored unioned with the payload pointers. This deals with the problem that the state is spread over three different objects (two bits and a separate variable) and reading or updating them atomically isn't practical, given that not only can uninstantiated keys change into instantiated or rejected keys, but rejected keys can also turn into instantiated keys - and someone accessing the key might not be using any locking. The main side effect of this problem is that what was held in the payload may change, depending on the state. For instance, you might observe the key to be in the rejected state. You then read the cached error, but if the key semaphore wasn't locked, the key might've become instantiated between the two reads - and you might now have something in hand that isn't actually an error code. The state is now KEY_IS_UNINSTANTIATED, KEY_IS_POSITIVE or a negative error code if the key is negatively instantiated. The key_is_instantiated() function is replaced with key_is_positive() to avoid confusion as negative keys are also 'instantiated'. Additionally, barriering is included: (1) Order payload-set before state-set during instantiation. (2) Order state-read before payload-read when using the key. Further separate barriering is necessary if RCU is being used to access the payload content after reading the payload pointers. Fixes: 146aa8b1453b ("KEYS: Merge the type-specific data with the payload data") Cc: [email protected] # v4.4+ Reported-by: Eric Biggers <[email protected]> Signed-off-by: David Howells <[email protected]> Reviewed-by: Eric Biggers <[email protected]> CWE ID: CWE-20
void user_describe(const struct key *key, struct seq_file *m) { seq_puts(m, key->description); if (key_is_positive(key)) seq_printf(m, ": %u", key->datalen); }
167,709
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void bpf_jit_compile(struct sk_filter *fp) { u8 temp[64]; u8 *prog; unsigned int proglen, oldproglen = 0; int ilen, i; int t_offset, f_offset; u8 t_op, f_op, seen = 0, pass; u8 *image = NULL; u8 *func; int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */ unsigned int cleanup_addr; /* epilogue code offset */ unsigned int *addrs; const struct sock_filter *filter = fp->insns; int flen = fp->len; if (!bpf_jit_enable) return; addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL); if (addrs == NULL) return; /* Before first pass, make a rough estimation of addrs[] * each bpf instruction is translated to less than 64 bytes */ for (proglen = 0, i = 0; i < flen; i++) { proglen += 64; addrs[i] = proglen; } cleanup_addr = proglen; /* epilogue address */ for (pass = 0; pass < 10; pass++) { /* no prologue/epilogue for trivial filters (RET something) */ proglen = 0; prog = temp; if (seen) { EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */ EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */ /* note : must save %rbx in case bpf_error is hit */ if (seen & (SEEN_XREG | SEEN_DATAREF)) EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */ if (seen & SEEN_XREG) CLEAR_X(); /* make sure we dont leek kernel memory */ /* * If this filter needs to access skb data, * loads r9 and r8 with : * r9 = skb->len - skb->data_len * r8 = skb->data */ if (seen & SEEN_DATAREF) { if (offsetof(struct sk_buff, len) <= 127) /* mov off8(%rdi),%r9d */ EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len)); else { /* mov off32(%rdi),%r9d */ EMIT3(0x44, 0x8b, 0x8f); EMIT(offsetof(struct sk_buff, len), 4); } if (is_imm8(offsetof(struct sk_buff, data_len))) /* sub off8(%rdi),%r9d */ EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len)); else { EMIT3(0x44, 0x2b, 0x8f); EMIT(offsetof(struct sk_buff, data_len), 4); } if (is_imm8(offsetof(struct sk_buff, data))) /* mov off8(%rdi),%r8 */ EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data)); else { /* mov off32(%rdi),%r8 */ EMIT3(0x4c, 0x8b, 0x87); EMIT(offsetof(struct sk_buff, data), 4); } } } switch (filter[0].code) { case BPF_S_RET_K: case BPF_S_LD_W_LEN: case BPF_S_ANC_PROTOCOL: case BPF_S_ANC_IFINDEX: case BPF_S_ANC_MARK: case BPF_S_ANC_RXHASH: case BPF_S_ANC_CPU: case BPF_S_ANC_QUEUE: case BPF_S_LD_W_ABS: case BPF_S_LD_H_ABS: case BPF_S_LD_B_ABS: /* first instruction sets A register (or is RET 'constant') */ break; default: /* make sure we dont leak kernel information to user */ CLEAR_A(); /* A = 0 */ } for (i = 0; i < flen; i++) { unsigned int K = filter[i].k; switch (filter[i].code) { case BPF_S_ALU_ADD_X: /* A += X; */ seen |= SEEN_XREG; EMIT2(0x01, 0xd8); /* add %ebx,%eax */ break; case BPF_S_ALU_ADD_K: /* A += K; */ if (!K) break; if (is_imm8(K)) EMIT3(0x83, 0xc0, K); /* add imm8,%eax */ else EMIT1_off32(0x05, K); /* add imm32,%eax */ break; case BPF_S_ALU_SUB_X: /* A -= X; */ seen |= SEEN_XREG; EMIT2(0x29, 0xd8); /* sub %ebx,%eax */ break; case BPF_S_ALU_SUB_K: /* A -= K */ if (!K) break; if (is_imm8(K)) EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */ else EMIT1_off32(0x2d, K); /* sub imm32,%eax */ break; case BPF_S_ALU_MUL_X: /* A *= X; */ seen |= SEEN_XREG; EMIT3(0x0f, 0xaf, 0xc3); /* imul %ebx,%eax */ break; case BPF_S_ALU_MUL_K: /* A *= K */ if (is_imm8(K)) EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */ else { EMIT2(0x69, 0xc0); /* imul imm32,%eax */ EMIT(K, 4); } break; case BPF_S_ALU_DIV_X: /* A /= X; */ seen |= SEEN_XREG; EMIT2(0x85, 0xdb); /* test %ebx,%ebx */ if (pc_ret0 != -1) EMIT_COND_JMP(X86_JE, addrs[pc_ret0] - (addrs[i] - 4)); else { EMIT_COND_JMP(X86_JNE, 2 + 5); CLEAR_A(); EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */ } EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */ break; case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */ EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */ EMIT(K, 4); EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */ break; case BPF_S_ALU_AND_X: seen |= SEEN_XREG; EMIT2(0x21, 0xd8); /* and %ebx,%eax */ break; case BPF_S_ALU_AND_K: if (K >= 0xFFFFFF00) { EMIT2(0x24, K & 0xFF); /* and imm8,%al */ } else if (K >= 0xFFFF0000) { EMIT2(0x66, 0x25); /* and imm16,%ax */ EMIT2(K, 2); } else { EMIT1_off32(0x25, K); /* and imm32,%eax */ } break; case BPF_S_ALU_OR_X: seen |= SEEN_XREG; EMIT2(0x09, 0xd8); /* or %ebx,%eax */ break; case BPF_S_ALU_OR_K: if (is_imm8(K)) EMIT3(0x83, 0xc8, K); /* or imm8,%eax */ else EMIT1_off32(0x0d, K); /* or imm32,%eax */ break; case BPF_S_ALU_LSH_X: /* A <<= X; */ seen |= SEEN_XREG; EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */ break; case BPF_S_ALU_LSH_K: if (K == 0) break; else if (K == 1) EMIT2(0xd1, 0xe0); /* shl %eax */ else EMIT3(0xc1, 0xe0, K); break; case BPF_S_ALU_RSH_X: /* A >>= X; */ seen |= SEEN_XREG; EMIT4(0x89, 0xd9, 0xd3, 0xe8); /* mov %ebx,%ecx; shr %cl,%eax */ break; case BPF_S_ALU_RSH_K: /* A >>= K; */ if (K == 0) break; else if (K == 1) EMIT2(0xd1, 0xe8); /* shr %eax */ else EMIT3(0xc1, 0xe8, K); break; case BPF_S_ALU_NEG: EMIT2(0xf7, 0xd8); /* neg %eax */ break; case BPF_S_RET_K: if (!K) { if (pc_ret0 == -1) pc_ret0 = i; CLEAR_A(); } else { EMIT1_off32(0xb8, K); /* mov $imm32,%eax */ } /* fallinto */ case BPF_S_RET_A: if (seen) { if (i != flen - 1) { EMIT_JMP(cleanup_addr - addrs[i]); break; } if (seen & SEEN_XREG) EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */ EMIT1(0xc9); /* leaveq */ } EMIT1(0xc3); /* ret */ break; case BPF_S_MISC_TAX: /* X = A */ seen |= SEEN_XREG; EMIT2(0x89, 0xc3); /* mov %eax,%ebx */ break; case BPF_S_MISC_TXA: /* A = X */ seen |= SEEN_XREG; EMIT2(0x89, 0xd8); /* mov %ebx,%eax */ break; case BPF_S_LD_IMM: /* A = K */ if (!K) CLEAR_A(); else EMIT1_off32(0xb8, K); /* mov $imm32,%eax */ break; case BPF_S_LDX_IMM: /* X = K */ seen |= SEEN_XREG; if (!K) CLEAR_X(); else EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */ break; case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */ seen |= SEEN_MEM; EMIT3(0x8b, 0x45, 0xf0 - K*4); break; case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */ seen |= SEEN_XREG | SEEN_MEM; EMIT3(0x8b, 0x5d, 0xf0 - K*4); break; case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */ seen |= SEEN_MEM; EMIT3(0x89, 0x45, 0xf0 - K*4); break; case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */ seen |= SEEN_XREG | SEEN_MEM; EMIT3(0x89, 0x5d, 0xf0 - K*4); break; case BPF_S_LD_W_LEN: /* A = skb->len; */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); if (is_imm8(offsetof(struct sk_buff, len))) /* mov off8(%rdi),%eax */ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len)); else { EMIT2(0x8b, 0x87); EMIT(offsetof(struct sk_buff, len), 4); } break; case BPF_S_LDX_W_LEN: /* X = skb->len; */ seen |= SEEN_XREG; if (is_imm8(offsetof(struct sk_buff, len))) /* mov off8(%rdi),%ebx */ EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len)); else { EMIT2(0x8b, 0x9f); EMIT(offsetof(struct sk_buff, len), 4); } break; case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); if (is_imm8(offsetof(struct sk_buff, protocol))) { /* movzwl off8(%rdi),%eax */ EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol)); } else { EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */ EMIT(offsetof(struct sk_buff, protocol), 4); } EMIT2(0x86, 0xc4); /* ntohs() : xchg %al,%ah */ break; case BPF_S_ANC_IFINDEX: if (is_imm8(offsetof(struct sk_buff, dev))) { /* movq off8(%rdi),%rax */ EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev)); } else { EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */ EMIT(offsetof(struct sk_buff, dev), 4); } EMIT3(0x48, 0x85, 0xc0); /* test %rax,%rax */ EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6)); BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); EMIT2(0x8b, 0x80); /* mov off32(%rax),%eax */ EMIT(offsetof(struct net_device, ifindex), 4); break; case BPF_S_ANC_MARK: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); if (is_imm8(offsetof(struct sk_buff, mark))) { /* mov off8(%rdi),%eax */ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark)); } else { EMIT2(0x8b, 0x87); EMIT(offsetof(struct sk_buff, mark), 4); } break; case BPF_S_ANC_RXHASH: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4); if (is_imm8(offsetof(struct sk_buff, rxhash))) { /* mov off8(%rdi),%eax */ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, rxhash)); } else { EMIT2(0x8b, 0x87); EMIT(offsetof(struct sk_buff, rxhash), 4); } break; case BPF_S_ANC_QUEUE: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); if (is_imm8(offsetof(struct sk_buff, queue_mapping))) { /* movzwl off8(%rdi),%eax */ EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping)); } else { EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */ EMIT(offsetof(struct sk_buff, queue_mapping), 4); } break; case BPF_S_ANC_CPU: #ifdef CONFIG_SMP EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */ EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */ #else CLEAR_A(); #endif break; case BPF_S_LD_W_ABS: func = sk_load_word; common_load: seen |= SEEN_DATAREF; if ((int)K < 0) goto out; t_offset = func - (image + addrs[i]); EMIT1_off32(0xbe, K); /* mov imm32,%esi */ EMIT1_off32(0xe8, t_offset); /* call */ break; case BPF_S_LD_H_ABS: func = sk_load_half; goto common_load; case BPF_S_LD_B_ABS: func = sk_load_byte; goto common_load; case BPF_S_LDX_B_MSH: if ((int)K < 0) { if (pc_ret0 != -1) { EMIT_JMP(addrs[pc_ret0] - addrs[i]); break; } CLEAR_A(); EMIT_JMP(cleanup_addr - addrs[i]); break; } seen |= SEEN_DATAREF | SEEN_XREG; t_offset = sk_load_byte_msh - (image + addrs[i]); EMIT1_off32(0xbe, K); /* mov imm32,%esi */ EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */ break; case BPF_S_LD_W_IND: func = sk_load_word_ind; common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; t_offset = func - (image + addrs[i]); EMIT1_off32(0xbe, K); /* mov imm32,%esi */ EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */ break; case BPF_S_LD_H_IND: func = sk_load_half_ind; goto common_load_ind; case BPF_S_LD_B_IND: func = sk_load_byte_ind; goto common_load_ind; case BPF_S_JMP_JA: t_offset = addrs[i + K] - addrs[i]; EMIT_JMP(t_offset); break; COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE); COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB); COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE); COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE); COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE); COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB); COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE); COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE); cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; t_offset = addrs[i + filter[i].jt] - addrs[i]; /* same targets, can avoid doing the test :) */ if (filter[i].jt == filter[i].jf) { EMIT_JMP(t_offset); break; } switch (filter[i].code) { case BPF_S_JMP_JGT_X: case BPF_S_JMP_JGE_X: case BPF_S_JMP_JEQ_X: seen |= SEEN_XREG; EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */ break; case BPF_S_JMP_JSET_X: seen |= SEEN_XREG; EMIT2(0x85, 0xd8); /* test %ebx,%eax */ break; case BPF_S_JMP_JEQ_K: if (K == 0) { EMIT2(0x85, 0xc0); /* test %eax,%eax */ break; } case BPF_S_JMP_JGT_K: case BPF_S_JMP_JGE_K: if (K <= 127) EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */ else EMIT1_off32(0x3d, K); /* cmp imm32,%eax */ break; case BPF_S_JMP_JSET_K: if (K <= 0xFF) EMIT2(0xa8, K); /* test imm8,%al */ else if (!(K & 0xFFFF00FF)) EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */ else if (K <= 0xFFFF) { EMIT2(0x66, 0xa9); /* test imm16,%ax */ EMIT(K, 2); } else { EMIT1_off32(0xa9, K); /* test imm32,%eax */ } break; } if (filter[i].jt != 0) { if (filter[i].jf) t_offset += is_near(f_offset) ? 2 : 6; EMIT_COND_JMP(t_op, t_offset); if (filter[i].jf) EMIT_JMP(f_offset); break; } EMIT_COND_JMP(f_op, f_offset); break; default: /* hmm, too complex filter, give up with jit compiler */ goto out; } ilen = prog - temp; if (image) { if (unlikely(proglen + ilen > oldproglen)) { pr_err("bpb_jit_compile fatal error\n"); kfree(addrs); module_free(NULL, image); return; } memcpy(image + proglen, temp, ilen); } proglen += ilen; addrs[i] = proglen; prog = temp; } /* last bpf instruction is always a RET : * use it to give the cleanup instruction(s) addr */ cleanup_addr = proglen - 1; /* ret */ if (seen) cleanup_addr -= 1; /* leaveq */ if (seen & SEEN_XREG) cleanup_addr -= 4; /* mov -8(%rbp),%rbx */ if (image) { WARN_ON(proglen != oldproglen); break; } if (proglen == oldproglen) { image = module_alloc(max_t(unsigned int, proglen, sizeof(struct work_struct))); if (!image) goto out; } oldproglen = proglen; } if (bpf_jit_enable > 1) pr_err("flen=%d proglen=%u pass=%d image=%p\n", flen, proglen, pass, image); if (image) { if (bpf_jit_enable > 1) print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS, 16, 1, image, proglen, false); bpf_flush_icache(image, image + proglen); fp->bpf_func = (void *)image; } out: kfree(addrs); return; } Commit Message: net: bpf_jit: fix an off-one bug in x86_64 cond jump target x86 jump instruction size is 2 or 5 bytes (near/long jump), not 2 or 6 bytes. In case a conditional jump is followed by a long jump, conditional jump target is one byte past the start of target instruction. Signed-off-by: Markus Kötter <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-189
void bpf_jit_compile(struct sk_filter *fp) { u8 temp[64]; u8 *prog; unsigned int proglen, oldproglen = 0; int ilen, i; int t_offset, f_offset; u8 t_op, f_op, seen = 0, pass; u8 *image = NULL; u8 *func; int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */ unsigned int cleanup_addr; /* epilogue code offset */ unsigned int *addrs; const struct sock_filter *filter = fp->insns; int flen = fp->len; if (!bpf_jit_enable) return; addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL); if (addrs == NULL) return; /* Before first pass, make a rough estimation of addrs[] * each bpf instruction is translated to less than 64 bytes */ for (proglen = 0, i = 0; i < flen; i++) { proglen += 64; addrs[i] = proglen; } cleanup_addr = proglen; /* epilogue address */ for (pass = 0; pass < 10; pass++) { /* no prologue/epilogue for trivial filters (RET something) */ proglen = 0; prog = temp; if (seen) { EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */ EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */ /* note : must save %rbx in case bpf_error is hit */ if (seen & (SEEN_XREG | SEEN_DATAREF)) EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */ if (seen & SEEN_XREG) CLEAR_X(); /* make sure we dont leek kernel memory */ /* * If this filter needs to access skb data, * loads r9 and r8 with : * r9 = skb->len - skb->data_len * r8 = skb->data */ if (seen & SEEN_DATAREF) { if (offsetof(struct sk_buff, len) <= 127) /* mov off8(%rdi),%r9d */ EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len)); else { /* mov off32(%rdi),%r9d */ EMIT3(0x44, 0x8b, 0x8f); EMIT(offsetof(struct sk_buff, len), 4); } if (is_imm8(offsetof(struct sk_buff, data_len))) /* sub off8(%rdi),%r9d */ EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len)); else { EMIT3(0x44, 0x2b, 0x8f); EMIT(offsetof(struct sk_buff, data_len), 4); } if (is_imm8(offsetof(struct sk_buff, data))) /* mov off8(%rdi),%r8 */ EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data)); else { /* mov off32(%rdi),%r8 */ EMIT3(0x4c, 0x8b, 0x87); EMIT(offsetof(struct sk_buff, data), 4); } } } switch (filter[0].code) { case BPF_S_RET_K: case BPF_S_LD_W_LEN: case BPF_S_ANC_PROTOCOL: case BPF_S_ANC_IFINDEX: case BPF_S_ANC_MARK: case BPF_S_ANC_RXHASH: case BPF_S_ANC_CPU: case BPF_S_ANC_QUEUE: case BPF_S_LD_W_ABS: case BPF_S_LD_H_ABS: case BPF_S_LD_B_ABS: /* first instruction sets A register (or is RET 'constant') */ break; default: /* make sure we dont leak kernel information to user */ CLEAR_A(); /* A = 0 */ } for (i = 0; i < flen; i++) { unsigned int K = filter[i].k; switch (filter[i].code) { case BPF_S_ALU_ADD_X: /* A += X; */ seen |= SEEN_XREG; EMIT2(0x01, 0xd8); /* add %ebx,%eax */ break; case BPF_S_ALU_ADD_K: /* A += K; */ if (!K) break; if (is_imm8(K)) EMIT3(0x83, 0xc0, K); /* add imm8,%eax */ else EMIT1_off32(0x05, K); /* add imm32,%eax */ break; case BPF_S_ALU_SUB_X: /* A -= X; */ seen |= SEEN_XREG; EMIT2(0x29, 0xd8); /* sub %ebx,%eax */ break; case BPF_S_ALU_SUB_K: /* A -= K */ if (!K) break; if (is_imm8(K)) EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */ else EMIT1_off32(0x2d, K); /* sub imm32,%eax */ break; case BPF_S_ALU_MUL_X: /* A *= X; */ seen |= SEEN_XREG; EMIT3(0x0f, 0xaf, 0xc3); /* imul %ebx,%eax */ break; case BPF_S_ALU_MUL_K: /* A *= K */ if (is_imm8(K)) EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */ else { EMIT2(0x69, 0xc0); /* imul imm32,%eax */ EMIT(K, 4); } break; case BPF_S_ALU_DIV_X: /* A /= X; */ seen |= SEEN_XREG; EMIT2(0x85, 0xdb); /* test %ebx,%ebx */ if (pc_ret0 != -1) EMIT_COND_JMP(X86_JE, addrs[pc_ret0] - (addrs[i] - 4)); else { EMIT_COND_JMP(X86_JNE, 2 + 5); CLEAR_A(); EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */ } EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */ break; case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */ EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */ EMIT(K, 4); EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */ break; case BPF_S_ALU_AND_X: seen |= SEEN_XREG; EMIT2(0x21, 0xd8); /* and %ebx,%eax */ break; case BPF_S_ALU_AND_K: if (K >= 0xFFFFFF00) { EMIT2(0x24, K & 0xFF); /* and imm8,%al */ } else if (K >= 0xFFFF0000) { EMIT2(0x66, 0x25); /* and imm16,%ax */ EMIT2(K, 2); } else { EMIT1_off32(0x25, K); /* and imm32,%eax */ } break; case BPF_S_ALU_OR_X: seen |= SEEN_XREG; EMIT2(0x09, 0xd8); /* or %ebx,%eax */ break; case BPF_S_ALU_OR_K: if (is_imm8(K)) EMIT3(0x83, 0xc8, K); /* or imm8,%eax */ else EMIT1_off32(0x0d, K); /* or imm32,%eax */ break; case BPF_S_ALU_LSH_X: /* A <<= X; */ seen |= SEEN_XREG; EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */ break; case BPF_S_ALU_LSH_K: if (K == 0) break; else if (K == 1) EMIT2(0xd1, 0xe0); /* shl %eax */ else EMIT3(0xc1, 0xe0, K); break; case BPF_S_ALU_RSH_X: /* A >>= X; */ seen |= SEEN_XREG; EMIT4(0x89, 0xd9, 0xd3, 0xe8); /* mov %ebx,%ecx; shr %cl,%eax */ break; case BPF_S_ALU_RSH_K: /* A >>= K; */ if (K == 0) break; else if (K == 1) EMIT2(0xd1, 0xe8); /* shr %eax */ else EMIT3(0xc1, 0xe8, K); break; case BPF_S_ALU_NEG: EMIT2(0xf7, 0xd8); /* neg %eax */ break; case BPF_S_RET_K: if (!K) { if (pc_ret0 == -1) pc_ret0 = i; CLEAR_A(); } else { EMIT1_off32(0xb8, K); /* mov $imm32,%eax */ } /* fallinto */ case BPF_S_RET_A: if (seen) { if (i != flen - 1) { EMIT_JMP(cleanup_addr - addrs[i]); break; } if (seen & SEEN_XREG) EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */ EMIT1(0xc9); /* leaveq */ } EMIT1(0xc3); /* ret */ break; case BPF_S_MISC_TAX: /* X = A */ seen |= SEEN_XREG; EMIT2(0x89, 0xc3); /* mov %eax,%ebx */ break; case BPF_S_MISC_TXA: /* A = X */ seen |= SEEN_XREG; EMIT2(0x89, 0xd8); /* mov %ebx,%eax */ break; case BPF_S_LD_IMM: /* A = K */ if (!K) CLEAR_A(); else EMIT1_off32(0xb8, K); /* mov $imm32,%eax */ break; case BPF_S_LDX_IMM: /* X = K */ seen |= SEEN_XREG; if (!K) CLEAR_X(); else EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */ break; case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */ seen |= SEEN_MEM; EMIT3(0x8b, 0x45, 0xf0 - K*4); break; case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */ seen |= SEEN_XREG | SEEN_MEM; EMIT3(0x8b, 0x5d, 0xf0 - K*4); break; case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */ seen |= SEEN_MEM; EMIT3(0x89, 0x45, 0xf0 - K*4); break; case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */ seen |= SEEN_XREG | SEEN_MEM; EMIT3(0x89, 0x5d, 0xf0 - K*4); break; case BPF_S_LD_W_LEN: /* A = skb->len; */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); if (is_imm8(offsetof(struct sk_buff, len))) /* mov off8(%rdi),%eax */ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len)); else { EMIT2(0x8b, 0x87); EMIT(offsetof(struct sk_buff, len), 4); } break; case BPF_S_LDX_W_LEN: /* X = skb->len; */ seen |= SEEN_XREG; if (is_imm8(offsetof(struct sk_buff, len))) /* mov off8(%rdi),%ebx */ EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len)); else { EMIT2(0x8b, 0x9f); EMIT(offsetof(struct sk_buff, len), 4); } break; case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); if (is_imm8(offsetof(struct sk_buff, protocol))) { /* movzwl off8(%rdi),%eax */ EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol)); } else { EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */ EMIT(offsetof(struct sk_buff, protocol), 4); } EMIT2(0x86, 0xc4); /* ntohs() : xchg %al,%ah */ break; case BPF_S_ANC_IFINDEX: if (is_imm8(offsetof(struct sk_buff, dev))) { /* movq off8(%rdi),%rax */ EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev)); } else { EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */ EMIT(offsetof(struct sk_buff, dev), 4); } EMIT3(0x48, 0x85, 0xc0); /* test %rax,%rax */ EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6)); BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); EMIT2(0x8b, 0x80); /* mov off32(%rax),%eax */ EMIT(offsetof(struct net_device, ifindex), 4); break; case BPF_S_ANC_MARK: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); if (is_imm8(offsetof(struct sk_buff, mark))) { /* mov off8(%rdi),%eax */ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark)); } else { EMIT2(0x8b, 0x87); EMIT(offsetof(struct sk_buff, mark), 4); } break; case BPF_S_ANC_RXHASH: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4); if (is_imm8(offsetof(struct sk_buff, rxhash))) { /* mov off8(%rdi),%eax */ EMIT3(0x8b, 0x47, offsetof(struct sk_buff, rxhash)); } else { EMIT2(0x8b, 0x87); EMIT(offsetof(struct sk_buff, rxhash), 4); } break; case BPF_S_ANC_QUEUE: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); if (is_imm8(offsetof(struct sk_buff, queue_mapping))) { /* movzwl off8(%rdi),%eax */ EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping)); } else { EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */ EMIT(offsetof(struct sk_buff, queue_mapping), 4); } break; case BPF_S_ANC_CPU: #ifdef CONFIG_SMP EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */ EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */ #else CLEAR_A(); #endif break; case BPF_S_LD_W_ABS: func = sk_load_word; common_load: seen |= SEEN_DATAREF; if ((int)K < 0) goto out; t_offset = func - (image + addrs[i]); EMIT1_off32(0xbe, K); /* mov imm32,%esi */ EMIT1_off32(0xe8, t_offset); /* call */ break; case BPF_S_LD_H_ABS: func = sk_load_half; goto common_load; case BPF_S_LD_B_ABS: func = sk_load_byte; goto common_load; case BPF_S_LDX_B_MSH: if ((int)K < 0) { if (pc_ret0 != -1) { EMIT_JMP(addrs[pc_ret0] - addrs[i]); break; } CLEAR_A(); EMIT_JMP(cleanup_addr - addrs[i]); break; } seen |= SEEN_DATAREF | SEEN_XREG; t_offset = sk_load_byte_msh - (image + addrs[i]); EMIT1_off32(0xbe, K); /* mov imm32,%esi */ EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */ break; case BPF_S_LD_W_IND: func = sk_load_word_ind; common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; t_offset = func - (image + addrs[i]); EMIT1_off32(0xbe, K); /* mov imm32,%esi */ EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */ break; case BPF_S_LD_H_IND: func = sk_load_half_ind; goto common_load_ind; case BPF_S_LD_B_IND: func = sk_load_byte_ind; goto common_load_ind; case BPF_S_JMP_JA: t_offset = addrs[i + K] - addrs[i]; EMIT_JMP(t_offset); break; COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE); COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB); COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE); COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE); COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE); COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB); COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE); COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE); cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; t_offset = addrs[i + filter[i].jt] - addrs[i]; /* same targets, can avoid doing the test :) */ if (filter[i].jt == filter[i].jf) { EMIT_JMP(t_offset); break; } switch (filter[i].code) { case BPF_S_JMP_JGT_X: case BPF_S_JMP_JGE_X: case BPF_S_JMP_JEQ_X: seen |= SEEN_XREG; EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */ break; case BPF_S_JMP_JSET_X: seen |= SEEN_XREG; EMIT2(0x85, 0xd8); /* test %ebx,%eax */ break; case BPF_S_JMP_JEQ_K: if (K == 0) { EMIT2(0x85, 0xc0); /* test %eax,%eax */ break; } case BPF_S_JMP_JGT_K: case BPF_S_JMP_JGE_K: if (K <= 127) EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */ else EMIT1_off32(0x3d, K); /* cmp imm32,%eax */ break; case BPF_S_JMP_JSET_K: if (K <= 0xFF) EMIT2(0xa8, K); /* test imm8,%al */ else if (!(K & 0xFFFF00FF)) EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */ else if (K <= 0xFFFF) { EMIT2(0x66, 0xa9); /* test imm16,%ax */ EMIT(K, 2); } else { EMIT1_off32(0xa9, K); /* test imm32,%eax */ } break; } if (filter[i].jt != 0) { if (filter[i].jf && f_offset) t_offset += is_near(f_offset) ? 2 : 5; EMIT_COND_JMP(t_op, t_offset); if (filter[i].jf) EMIT_JMP(f_offset); break; } EMIT_COND_JMP(f_op, f_offset); break; default: /* hmm, too complex filter, give up with jit compiler */ goto out; } ilen = prog - temp; if (image) { if (unlikely(proglen + ilen > oldproglen)) { pr_err("bpb_jit_compile fatal error\n"); kfree(addrs); module_free(NULL, image); return; } memcpy(image + proglen, temp, ilen); } proglen += ilen; addrs[i] = proglen; prog = temp; } /* last bpf instruction is always a RET : * use it to give the cleanup instruction(s) addr */ cleanup_addr = proglen - 1; /* ret */ if (seen) cleanup_addr -= 1; /* leaveq */ if (seen & SEEN_XREG) cleanup_addr -= 4; /* mov -8(%rbp),%rbx */ if (image) { WARN_ON(proglen != oldproglen); break; } if (proglen == oldproglen) { image = module_alloc(max_t(unsigned int, proglen, sizeof(struct work_struct))); if (!image) goto out; } oldproglen = proglen; } if (bpf_jit_enable > 1) pr_err("flen=%d proglen=%u pass=%d image=%p\n", flen, proglen, pass, image); if (image) { if (bpf_jit_enable > 1) print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS, 16, 1, image, proglen, false); bpf_flush_icache(image, image + proglen); fp->bpf_func = (void *)image; } out: kfree(addrs); return; }
166,388
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int sgi_clock_set(clockid_t clockid, struct timespec *tp) { u64 nsec; u64 rem; nsec = rtc_time() * sgi_clock_period; sgi_clock_offset.tv_sec = tp->tv_sec - div_long_long_rem(nsec, NSEC_PER_SEC, &rem); if (rem <= tp->tv_nsec) sgi_clock_offset.tv_nsec = tp->tv_sec - rem; else { sgi_clock_offset.tv_nsec = tp->tv_sec + NSEC_PER_SEC - rem; sgi_clock_offset.tv_sec--; } return 0; } Commit Message: remove div_long_long_rem x86 is the only arch right now, which provides an optimized for div_long_long_rem and it has the downside that one has to be very careful that the divide doesn't overflow. The API is a little akward, as the arguments for the unsigned divide are signed. The signed version also doesn't handle a negative divisor and produces worse code on 64bit archs. There is little incentive to keep this API alive, so this converts the few users to the new API. Signed-off-by: Roman Zippel <[email protected]> Cc: Ralf Baechle <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: john stultz <[email protected]> Cc: Christoph Lameter <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> CWE ID: CWE-189
static int sgi_clock_set(clockid_t clockid, struct timespec *tp) { u64 nsec; u32 rem; nsec = rtc_time() * sgi_clock_period; sgi_clock_offset.tv_sec = tp->tv_sec - div_u64_rem(nsec, NSEC_PER_SEC, &rem); if (rem <= tp->tv_nsec) sgi_clock_offset.tv_nsec = tp->tv_sec - rem; else { sgi_clock_offset.tv_nsec = tp->tv_sec + NSEC_PER_SEC - rem; sgi_clock_offset.tv_sec--; } return 0; }
165,750
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int __usb_get_extra_descriptor(char *buffer, unsigned size, unsigned char type, void **ptr) { struct usb_descriptor_header *header; while (size >= sizeof(struct usb_descriptor_header)) { header = (struct usb_descriptor_header *)buffer; if (header->bLength < 2) { printk(KERN_ERR "%s: bogus descriptor, type %d length %d\n", usbcore_name, header->bDescriptorType, header->bLength); return -1; } if (header->bDescriptorType == type) { *ptr = header; return 0; } buffer += header->bLength; size -= header->bLength; } return -1; } Commit Message: USB: check usb_get_extra_descriptor for proper size When reading an extra descriptor, we need to properly check the minimum and maximum size allowed, to prevent from invalid data being sent by a device. Reported-by: Hui Peng <[email protected]> Reported-by: Mathias Payer <[email protected]> Co-developed-by: Linus Torvalds <[email protected]> Signed-off-by: Hui Peng <[email protected]> Signed-off-by: Mathias Payer <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Cc: stable <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]> CWE ID: CWE-400
int __usb_get_extra_descriptor(char *buffer, unsigned size, unsigned char type, void **ptr, size_t minsize) { struct usb_descriptor_header *header; while (size >= sizeof(struct usb_descriptor_header)) { header = (struct usb_descriptor_header *)buffer; if (header->bLength < 2 || header->bLength > size) { printk(KERN_ERR "%s: bogus descriptor, type %d length %d\n", usbcore_name, header->bDescriptorType, header->bLength); return -1; } if (header->bDescriptorType == type && header->bLength >= minsize) { *ptr = header; return 0; } buffer += header->bLength; size -= header->bLength; } return -1; }
168,960
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: SSLErrorInfo SSLErrorInfo::CreateError(ErrorType error_type, net::X509Certificate* cert, const GURL& request_url) { string16 title, details, short_description; std::vector<string16> extra_info; switch (error_type) { case CERT_COMMON_NAME_INVALID: { title = l10n_util::GetStringUTF16(IDS_CERT_ERROR_COMMON_NAME_INVALID_TITLE); std::vector<std::string> dns_names; cert->GetDNSNames(&dns_names); DCHECK(!dns_names.empty()); size_t i = 0; for (; i < dns_names.size(); ++i) { if (dns_names[i] == cert->subject().common_name) break; } if (i == dns_names.size()) i = 0; details = l10n_util::GetStringFUTF16(IDS_CERT_ERROR_COMMON_NAME_INVALID_DETAILS, UTF8ToUTF16(request_url.host()), UTF8ToUTF16(dns_names[i]), UTF8ToUTF16(request_url.host())); short_description = l10n_util::GetStringUTF16( IDS_CERT_ERROR_COMMON_NAME_INVALID_DESCRIPTION); extra_info.push_back( l10n_util::GetStringUTF16(IDS_CERT_ERROR_EXTRA_INFO_1)); extra_info.push_back( l10n_util::GetStringFUTF16( IDS_CERT_ERROR_COMMON_NAME_INVALID_EXTRA_INFO_2, UTF8ToUTF16(cert->subject().common_name), UTF8ToUTF16(request_url.host()))); break; } case CERT_DATE_INVALID: extra_info.push_back( l10n_util::GetStringUTF16(IDS_CERT_ERROR_EXTRA_INFO_1)); if (cert->HasExpired()) { title = l10n_util::GetStringUTF16(IDS_CERT_ERROR_EXPIRED_TITLE); details = l10n_util::GetStringFUTF16( IDS_CERT_ERROR_EXPIRED_DETAILS, UTF8ToUTF16(request_url.host()), UTF8ToUTF16(request_url.host()), base::TimeFormatFriendlyDateAndTime(base::Time::Now())); short_description = l10n_util::GetStringUTF16(IDS_CERT_ERROR_EXPIRED_DESCRIPTION); extra_info.push_back(l10n_util::GetStringUTF16( IDS_CERT_ERROR_EXPIRED_DETAILS_EXTRA_INFO_2)); } else { title = l10n_util::GetStringUTF16(IDS_CERT_ERROR_NOT_YET_VALID_TITLE); details = l10n_util::GetStringFUTF16( IDS_CERT_ERROR_NOT_YET_VALID_DETAILS, UTF8ToUTF16(request_url.host()), UTF8ToUTF16(request_url.host()), base::TimeFormatFriendlyDateAndTime(base::Time::Now())); short_description = l10n_util::GetStringUTF16(IDS_CERT_ERROR_NOT_YET_VALID_DESCRIPTION); extra_info.push_back( l10n_util::GetStringUTF16( IDS_CERT_ERROR_NOT_YET_VALID_DETAILS_EXTRA_INFO_2)); } break; case CERT_AUTHORITY_INVALID: title = l10n_util::GetStringUTF16(IDS_CERT_ERROR_AUTHORITY_INVALID_TITLE); details = l10n_util::GetStringFUTF16( IDS_CERT_ERROR_AUTHORITY_INVALID_DETAILS, UTF8ToUTF16(request_url.host())); short_description = l10n_util::GetStringUTF16( IDS_CERT_ERROR_AUTHORITY_INVALID_DESCRIPTION); extra_info.push_back( l10n_util::GetStringUTF16(IDS_CERT_ERROR_EXTRA_INFO_1)); extra_info.push_back(l10n_util::GetStringFUTF16( IDS_CERT_ERROR_AUTHORITY_INVALID_EXTRA_INFO_2, UTF8ToUTF16(request_url.host()), UTF8ToUTF16(request_url.host()))); extra_info.push_back(l10n_util::GetStringUTF16( IDS_CERT_ERROR_AUTHORITY_INVALID_EXTRA_INFO_3)); break; case CERT_CONTAINS_ERRORS: title = l10n_util::GetStringUTF16(IDS_CERT_ERROR_CONTAINS_ERRORS_TITLE); details = l10n_util::GetStringFUTF16( IDS_CERT_ERROR_CONTAINS_ERRORS_DETAILS, UTF8ToUTF16(request_url.host())); short_description = l10n_util::GetStringUTF16(IDS_CERT_ERROR_CONTAINS_ERRORS_DESCRIPTION); extra_info.push_back( l10n_util::GetStringFUTF16(IDS_CERT_ERROR_EXTRA_INFO_1, UTF8ToUTF16(request_url.host()))); extra_info.push_back(l10n_util::GetStringUTF16( IDS_CERT_ERROR_CONTAINS_ERRORS_EXTRA_INFO_2)); break; case CERT_NO_REVOCATION_MECHANISM: title = l10n_util::GetStringUTF16( IDS_CERT_ERROR_NO_REVOCATION_MECHANISM_TITLE); details = l10n_util::GetStringUTF16( IDS_CERT_ERROR_NO_REVOCATION_MECHANISM_DETAILS); short_description = l10n_util::GetStringUTF16( IDS_CERT_ERROR_NO_REVOCATION_MECHANISM_DESCRIPTION); break; case CERT_UNABLE_TO_CHECK_REVOCATION: title = l10n_util::GetStringUTF16( IDS_CERT_ERROR_UNABLE_TO_CHECK_REVOCATION_TITLE); details = l10n_util::GetStringUTF16( IDS_CERT_ERROR_UNABLE_TO_CHECK_REVOCATION_DETAILS); short_description = l10n_util::GetStringUTF16( IDS_CERT_ERROR_UNABLE_TO_CHECK_REVOCATION_DESCRIPTION); break; case CERT_REVOKED: title = l10n_util::GetStringUTF16(IDS_CERT_ERROR_REVOKED_CERT_TITLE); details = l10n_util::GetStringFUTF16(IDS_CERT_ERROR_REVOKED_CERT_DETAILS, UTF8ToUTF16(request_url.host())); short_description = l10n_util::GetStringUTF16(IDS_CERT_ERROR_REVOKED_CERT_DESCRIPTION); extra_info.push_back( l10n_util::GetStringUTF16(IDS_CERT_ERROR_EXTRA_INFO_1)); extra_info.push_back( l10n_util::GetStringUTF16(IDS_CERT_ERROR_REVOKED_CERT_EXTRA_INFO_2)); break; case CERT_INVALID: title = l10n_util::GetStringUTF16(IDS_CERT_ERROR_INVALID_CERT_TITLE); details = l10n_util::GetStringFUTF16( IDS_CERT_ERROR_INVALID_CERT_DETAILS, UTF8ToUTF16(request_url.host())); short_description = l10n_util::GetStringUTF16(IDS_CERT_ERROR_INVALID_CERT_DESCRIPTION); extra_info.push_back( l10n_util::GetStringUTF16(IDS_CERT_ERROR_EXTRA_INFO_1)); extra_info.push_back(l10n_util::GetStringUTF16( IDS_CERT_ERROR_INVALID_CERT_EXTRA_INFO_2)); break; case CERT_WEAK_SIGNATURE_ALGORITHM: title = l10n_util::GetStringUTF16( IDS_CERT_ERROR_WEAK_SIGNATURE_ALGORITHM_TITLE); details = l10n_util::GetStringFUTF16( IDS_CERT_ERROR_WEAK_SIGNATURE_ALGORITHM_DETAILS, UTF8ToUTF16(request_url.host())); short_description = l10n_util::GetStringUTF16( IDS_CERT_ERROR_WEAK_SIGNATURE_ALGORITHM_DESCRIPTION); extra_info.push_back( l10n_util::GetStringUTF16(IDS_CERT_ERROR_EXTRA_INFO_1)); extra_info.push_back( l10n_util::GetStringUTF16( IDS_CERT_ERROR_WEAK_SIGNATURE_ALGORITHM_EXTRA_INFO_2)); break; case CERT_WEAK_KEY: title = l10n_util::GetStringUTF16(IDS_CERT_ERROR_WEAK_KEY_TITLE); details = l10n_util::GetStringFUTF16( IDS_CERT_ERROR_WEAK_KEY_DETAILS, UTF8ToUTF16(request_url.host())); short_description = l10n_util::GetStringUTF16( IDS_CERT_ERROR_WEAK_KEY_DESCRIPTION); extra_info.push_back( l10n_util::GetStringUTF16(IDS_CERT_ERROR_EXTRA_INFO_1)); extra_info.push_back( l10n_util::GetStringUTF16( IDS_CERT_ERROR_WEAK_KEY_EXTRA_INFO_2)); break; case UNKNOWN: title = l10n_util::GetStringUTF16(IDS_CERT_ERROR_UNKNOWN_ERROR_TITLE); details = l10n_util::GetStringUTF16(IDS_CERT_ERROR_UNKNOWN_ERROR_DETAILS); short_description = l10n_util::GetStringUTF16(IDS_CERT_ERROR_UNKNOWN_ERROR_DESCRIPTION); break; default: NOTREACHED(); } return SSLErrorInfo(title, details, short_description, extra_info); } Commit Message: Properly EscapeForHTML potentially malicious input from X.509 certificates. BUG=142956 TEST=Create an X.509 certificate with a CN field that contains JavaScript. When you get the SSL error screen, check that the HTML + JavaScript is escape instead of being treated as HTML and/or script. Review URL: https://chromiumcodereview.appspot.com/10827364 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@152210 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-79
SSLErrorInfo SSLErrorInfo::CreateError(ErrorType error_type, net::X509Certificate* cert, const GURL& request_url) { string16 title, details, short_description; std::vector<string16> extra_info; switch (error_type) { case CERT_COMMON_NAME_INVALID: { title = l10n_util::GetStringUTF16(IDS_CERT_ERROR_COMMON_NAME_INVALID_TITLE); std::vector<std::string> dns_names; cert->GetDNSNames(&dns_names); DCHECK(!dns_names.empty()); size_t i = 0; for (; i < dns_names.size(); ++i) { if (dns_names[i] == cert->subject().common_name) break; } if (i == dns_names.size()) i = 0; details = l10n_util::GetStringFUTF16(IDS_CERT_ERROR_COMMON_NAME_INVALID_DETAILS, UTF8ToUTF16(request_url.host()), net::EscapeForHTML( UTF8ToUTF16(dns_names[i])), UTF8ToUTF16(request_url.host())); short_description = l10n_util::GetStringUTF16( IDS_CERT_ERROR_COMMON_NAME_INVALID_DESCRIPTION); extra_info.push_back( l10n_util::GetStringUTF16(IDS_CERT_ERROR_EXTRA_INFO_1)); extra_info.push_back( l10n_util::GetStringFUTF16( IDS_CERT_ERROR_COMMON_NAME_INVALID_EXTRA_INFO_2, net::EscapeForHTML(UTF8ToUTF16(cert->subject().common_name)), UTF8ToUTF16(request_url.host()))); break; } case CERT_DATE_INVALID: extra_info.push_back( l10n_util::GetStringUTF16(IDS_CERT_ERROR_EXTRA_INFO_1)); if (cert->HasExpired()) { title = l10n_util::GetStringUTF16(IDS_CERT_ERROR_EXPIRED_TITLE); details = l10n_util::GetStringFUTF16( IDS_CERT_ERROR_EXPIRED_DETAILS, UTF8ToUTF16(request_url.host()), UTF8ToUTF16(request_url.host()), base::TimeFormatFriendlyDateAndTime(base::Time::Now())); short_description = l10n_util::GetStringUTF16(IDS_CERT_ERROR_EXPIRED_DESCRIPTION); extra_info.push_back(l10n_util::GetStringUTF16( IDS_CERT_ERROR_EXPIRED_DETAILS_EXTRA_INFO_2)); } else { title = l10n_util::GetStringUTF16(IDS_CERT_ERROR_NOT_YET_VALID_TITLE); details = l10n_util::GetStringFUTF16( IDS_CERT_ERROR_NOT_YET_VALID_DETAILS, UTF8ToUTF16(request_url.host()), UTF8ToUTF16(request_url.host()), base::TimeFormatFriendlyDateAndTime(base::Time::Now())); short_description = l10n_util::GetStringUTF16(IDS_CERT_ERROR_NOT_YET_VALID_DESCRIPTION); extra_info.push_back( l10n_util::GetStringUTF16( IDS_CERT_ERROR_NOT_YET_VALID_DETAILS_EXTRA_INFO_2)); } break; case CERT_AUTHORITY_INVALID: title = l10n_util::GetStringUTF16(IDS_CERT_ERROR_AUTHORITY_INVALID_TITLE); details = l10n_util::GetStringFUTF16( IDS_CERT_ERROR_AUTHORITY_INVALID_DETAILS, UTF8ToUTF16(request_url.host())); short_description = l10n_util::GetStringUTF16( IDS_CERT_ERROR_AUTHORITY_INVALID_DESCRIPTION); extra_info.push_back( l10n_util::GetStringUTF16(IDS_CERT_ERROR_EXTRA_INFO_1)); extra_info.push_back(l10n_util::GetStringFUTF16( IDS_CERT_ERROR_AUTHORITY_INVALID_EXTRA_INFO_2, UTF8ToUTF16(request_url.host()), UTF8ToUTF16(request_url.host()))); extra_info.push_back(l10n_util::GetStringUTF16( IDS_CERT_ERROR_AUTHORITY_INVALID_EXTRA_INFO_3)); break; case CERT_CONTAINS_ERRORS: title = l10n_util::GetStringUTF16(IDS_CERT_ERROR_CONTAINS_ERRORS_TITLE); details = l10n_util::GetStringFUTF16( IDS_CERT_ERROR_CONTAINS_ERRORS_DETAILS, UTF8ToUTF16(request_url.host())); short_description = l10n_util::GetStringUTF16(IDS_CERT_ERROR_CONTAINS_ERRORS_DESCRIPTION); extra_info.push_back( l10n_util::GetStringFUTF16(IDS_CERT_ERROR_EXTRA_INFO_1, UTF8ToUTF16(request_url.host()))); extra_info.push_back(l10n_util::GetStringUTF16( IDS_CERT_ERROR_CONTAINS_ERRORS_EXTRA_INFO_2)); break; case CERT_NO_REVOCATION_MECHANISM: title = l10n_util::GetStringUTF16( IDS_CERT_ERROR_NO_REVOCATION_MECHANISM_TITLE); details = l10n_util::GetStringUTF16( IDS_CERT_ERROR_NO_REVOCATION_MECHANISM_DETAILS); short_description = l10n_util::GetStringUTF16( IDS_CERT_ERROR_NO_REVOCATION_MECHANISM_DESCRIPTION); break; case CERT_UNABLE_TO_CHECK_REVOCATION: title = l10n_util::GetStringUTF16( IDS_CERT_ERROR_UNABLE_TO_CHECK_REVOCATION_TITLE); details = l10n_util::GetStringUTF16( IDS_CERT_ERROR_UNABLE_TO_CHECK_REVOCATION_DETAILS); short_description = l10n_util::GetStringUTF16( IDS_CERT_ERROR_UNABLE_TO_CHECK_REVOCATION_DESCRIPTION); break; case CERT_REVOKED: title = l10n_util::GetStringUTF16(IDS_CERT_ERROR_REVOKED_CERT_TITLE); details = l10n_util::GetStringFUTF16(IDS_CERT_ERROR_REVOKED_CERT_DETAILS, UTF8ToUTF16(request_url.host())); short_description = l10n_util::GetStringUTF16(IDS_CERT_ERROR_REVOKED_CERT_DESCRIPTION); extra_info.push_back( l10n_util::GetStringUTF16(IDS_CERT_ERROR_EXTRA_INFO_1)); extra_info.push_back( l10n_util::GetStringUTF16(IDS_CERT_ERROR_REVOKED_CERT_EXTRA_INFO_2)); break; case CERT_INVALID: title = l10n_util::GetStringUTF16(IDS_CERT_ERROR_INVALID_CERT_TITLE); details = l10n_util::GetStringFUTF16( IDS_CERT_ERROR_INVALID_CERT_DETAILS, UTF8ToUTF16(request_url.host())); short_description = l10n_util::GetStringUTF16(IDS_CERT_ERROR_INVALID_CERT_DESCRIPTION); extra_info.push_back( l10n_util::GetStringUTF16(IDS_CERT_ERROR_EXTRA_INFO_1)); extra_info.push_back(l10n_util::GetStringUTF16( IDS_CERT_ERROR_INVALID_CERT_EXTRA_INFO_2)); break; case CERT_WEAK_SIGNATURE_ALGORITHM: title = l10n_util::GetStringUTF16( IDS_CERT_ERROR_WEAK_SIGNATURE_ALGORITHM_TITLE); details = l10n_util::GetStringFUTF16( IDS_CERT_ERROR_WEAK_SIGNATURE_ALGORITHM_DETAILS, UTF8ToUTF16(request_url.host())); short_description = l10n_util::GetStringUTF16( IDS_CERT_ERROR_WEAK_SIGNATURE_ALGORITHM_DESCRIPTION); extra_info.push_back( l10n_util::GetStringUTF16(IDS_CERT_ERROR_EXTRA_INFO_1)); extra_info.push_back( l10n_util::GetStringUTF16( IDS_CERT_ERROR_WEAK_SIGNATURE_ALGORITHM_EXTRA_INFO_2)); break; case CERT_WEAK_KEY: title = l10n_util::GetStringUTF16(IDS_CERT_ERROR_WEAK_KEY_TITLE); details = l10n_util::GetStringFUTF16( IDS_CERT_ERROR_WEAK_KEY_DETAILS, UTF8ToUTF16(request_url.host())); short_description = l10n_util::GetStringUTF16( IDS_CERT_ERROR_WEAK_KEY_DESCRIPTION); extra_info.push_back( l10n_util::GetStringUTF16(IDS_CERT_ERROR_EXTRA_INFO_1)); extra_info.push_back( l10n_util::GetStringUTF16( IDS_CERT_ERROR_WEAK_KEY_EXTRA_INFO_2)); break; case UNKNOWN: title = l10n_util::GetStringUTF16(IDS_CERT_ERROR_UNKNOWN_ERROR_TITLE); details = l10n_util::GetStringUTF16(IDS_CERT_ERROR_UNKNOWN_ERROR_DETAILS); short_description = l10n_util::GetStringUTF16(IDS_CERT_ERROR_UNKNOWN_ERROR_DESCRIPTION); break; default: NOTREACHED(); } return SSLErrorInfo(title, details, short_description, extra_info); }
170,904
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void BrowserEventRouter::ExtensionActionExecuted( Profile* profile, const ExtensionAction& extension_action, WebContents* web_contents) { const char* event_name = NULL; switch (extension_action.action_type()) { case Extension::ActionInfo::TYPE_BROWSER: event_name = "browserAction.onClicked"; break; case Extension::ActionInfo::TYPE_PAGE: event_name = "pageAction.onClicked"; break; case Extension::ActionInfo::TYPE_SCRIPT_BADGE: event_name = "scriptBadge.onClicked"; break; case Extension::ActionInfo::TYPE_SYSTEM_INDICATOR: break; } if (event_name) { scoped_ptr<ListValue> args(new ListValue()); DictionaryValue* tab_value = ExtensionTabUtil::CreateTabValue( web_contents, ExtensionTabUtil::INCLUDE_PRIVACY_SENSITIVE_FIELDS); args->Append(tab_value); DispatchEventToExtension(profile, extension_action.extension_id(), event_name, args.Pass(), EventRouter::USER_GESTURE_ENABLED); } } Commit Message: Do not pass URLs in onUpdated events to extensions unless they have the "tabs" permission. BUG=168442 Review URL: https://chromiumcodereview.appspot.com/11824004 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@176406 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-264
void BrowserEventRouter::ExtensionActionExecuted( Profile* profile, const ExtensionAction& extension_action, WebContents* web_contents) { const char* event_name = NULL; switch (extension_action.action_type()) { case Extension::ActionInfo::TYPE_BROWSER: event_name = "browserAction.onClicked"; break; case Extension::ActionInfo::TYPE_PAGE: event_name = "pageAction.onClicked"; break; case Extension::ActionInfo::TYPE_SCRIPT_BADGE: event_name = "scriptBadge.onClicked"; break; case Extension::ActionInfo::TYPE_SYSTEM_INDICATOR: break; } if (event_name) { scoped_ptr<ListValue> args(new ListValue()); DictionaryValue* tab_value = ExtensionTabUtil::CreateTabValue( web_contents); args->Append(tab_value); DispatchEventToExtension(profile, extension_action.extension_id(), event_name, args.Pass(), EventRouter::USER_GESTURE_ENABLED); } }
171,450
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: compare_read(struct display *dp, int applied_transforms) { /* Compare the png_info from read_ip with original_info */ size_t rowbytes; png_uint_32 width, height; int bit_depth, color_type; int interlace_method, compression_method, filter_method; const char *e = NULL; png_get_IHDR(dp->read_pp, dp->read_ip, &width, &height, &bit_depth, &color_type, &interlace_method, &compression_method, &filter_method); # define C(item) if (item != dp->item) \ display_log(dp, APP_WARNING, "IHDR " #item "(%lu) changed to %lu",\ (unsigned long)dp->item, (unsigned long)item), e = #item /* The IHDR should be identical: */ C(width); C(height); C(bit_depth); C(color_type); C(interlace_method); C(compression_method); C(filter_method); /* 'e' remains set to the name of the last thing changed: */ if (e) display_log(dp, APP_ERROR, "IHDR changed (%s)", e); /* All the chunks from the original PNG should be preserved in the output PNG * because the PNG format has not been changed. */ { unsigned long chunks = png_get_valid(dp->read_pp, dp->read_ip, 0xffffffff); if (chunks != dp->chunks) display_log(dp, APP_FAIL, "PNG chunks changed from 0x%lx to 0x%lx", (unsigned long)dp->chunks, chunks); } /* rowbytes should be the same */ rowbytes = png_get_rowbytes(dp->read_pp, dp->read_ip); /* NOTE: on 64-bit systems this may trash the top bits of rowbytes, * which could lead to weird error messages. */ if (rowbytes != dp->original_rowbytes) display_log(dp, APP_ERROR, "PNG rowbytes changed from %lu to %lu", (unsigned long)dp->original_rowbytes, (unsigned long)rowbytes); /* The rows should be the same too, unless the applied transforms includes * the shift transform, in which case low bits may have been lost. */ { png_bytepp rows = png_get_rows(dp->read_pp, dp->read_ip); unsigned int mask; /* mask (if not zero) for the final byte */ if (bit_depth < 8) { /* Need the stray bits at the end, this depends only on the low bits * of the image width; overflow does not matter. If the width is an * exact multiple of 8 bits this gives a mask of 0, not 0xff. */ mask = 0xff & (0xff00 >> ((bit_depth * width) & 7)); } else mask = 0; if (rows == NULL) display_log(dp, LIBPNG_BUG, "png_get_rows returned NULL"); if ((applied_transforms & PNG_TRANSFORM_SHIFT) == 0 || (dp->active_transforms & PNG_TRANSFORM_SHIFT) == 0 || color_type == PNG_COLOR_TYPE_PALETTE) { unsigned long y; for (y=0; y<height; ++y) { png_bytep row = rows[y]; png_bytep orig = dp->original_rows[y]; if (memcmp(row, orig, rowbytes-(mask != 0)) != 0 || (mask != 0 && ((row[rowbytes-1] & mask) != (orig[rowbytes-1] & mask)))) { size_t x; /* Find the first error */ for (x=0; x<rowbytes-1; ++x) if (row[x] != orig[x]) break; display_log(dp, APP_FAIL, "byte(%lu,%lu) changed 0x%.2x -> 0x%.2x", (unsigned long)x, (unsigned long)y, orig[x], row[x]); return 0; /* don't keep reporting failed rows on 'continue' */ } } } else { unsigned long y; int bpp; /* bits-per-pixel then bytes-per-pixel */ /* components are up to 8 bytes in size */ png_byte sig_bits[8]; png_color_8p sBIT; if (png_get_sBIT(dp->read_pp, dp->read_ip, &sBIT) != PNG_INFO_sBIT) display_log(dp, INTERNAL_ERROR, "active shift transform but no sBIT in file"); switch (color_type) { case PNG_COLOR_TYPE_GRAY: sig_bits[0] = sBIT->gray; bpp = bit_depth; break; case PNG_COLOR_TYPE_GA: sig_bits[0] = sBIT->gray; sig_bits[1] = sBIT->alpha; bpp = 2 * bit_depth; break; case PNG_COLOR_TYPE_RGB: sig_bits[0] = sBIT->red; sig_bits[1] = sBIT->green; sig_bits[2] = sBIT->blue; bpp = 3 * bit_depth; break; case PNG_COLOR_TYPE_RGBA: sig_bits[0] = sBIT->red; sig_bits[1] = sBIT->green; sig_bits[2] = sBIT->blue; sig_bits[3] = sBIT->alpha; bpp = 4 * bit_depth; break; default: display_log(dp, LIBPNG_ERROR, "invalid colour type %d", color_type); /*NOTREACHED*/ bpp = 0; break; } { int b; for (b=0; 8*b<bpp; ++b) { /* libpng should catch this; if not there is a security issue * because an app (like this one) may overflow an array. In fact * libpng doesn't catch this at present. */ if (sig_bits[b] == 0 || sig_bits[b] > bit_depth/*!palette*/) display_log(dp, LIBPNG_BUG, "invalid sBIT[%u] value %d returned for PNG bit depth %d", b, sig_bits[b], bit_depth); } } if (bpp < 8 && bpp != bit_depth) { /* sanity check; this is a grayscale PNG; something is wrong in the * code above. */ display_log(dp, INTERNAL_ERROR, "invalid bpp %u for bit_depth %u", bpp, bit_depth); } switch (bit_depth) { int b; case 16: /* Two bytes per component, bit-endian */ for (b = (bpp >> 4); b > 0; ) { unsigned int sig = (unsigned int)(0xffff0000 >> sig_bits[b]); sig_bits[2*b+1] = (png_byte)sig; sig_bits[2*b+0] = (png_byte)(sig >> 8); /* big-endian */ } break; case 8: /* One byte per component */ for (b=0; b*8 < bpp; ++b) sig_bits[b] = (png_byte)(0xff00 >> sig_bits[b]); break; case 1: /* allowed, but dumb */ /* Value is 1 */ sig_bits[0] = 0xff; break; case 2: /* Replicate 4 times */ /* Value is 1 or 2 */ b = 0x3 & ((0x3<<2) >> sig_bits[0]); b |= b << 2; b |= b << 4; sig_bits[0] = (png_byte)b; break; case 4: /* Relicate twice */ /* Value is 1, 2, 3 or 4 */ b = 0xf & ((0xf << 4) >> sig_bits[0]); b |= b << 4; sig_bits[0] = (png_byte)b; break; default: display_log(dp, LIBPNG_BUG, "invalid bit depth %d", bit_depth); break; } /* Convert bpp to bytes; this gives '1' for low-bit depth grayscale, * where there are multiple pixels per byte. */ bpp = (bpp+7) >> 3; /* The mask can be combined with sig_bits[0] */ if (mask != 0) { mask &= sig_bits[0]; if (bpp != 1 || mask == 0) display_log(dp, INTERNAL_ERROR, "mask calculation error %u, %u", bpp, mask); } for (y=0; y<height; ++y) { png_bytep row = rows[y]; png_bytep orig = dp->original_rows[y]; unsigned long x; for (x=0; x<(width-(mask!=0)); ++x) { int b; for (b=0; b<bpp; ++b) { if ((*row++ & sig_bits[b]) != (*orig++ & sig_bits[b])) { display_log(dp, APP_FAIL, "significant bits at (%lu[%u],%lu) changed %.2x->%.2x", x, b, y, orig[-1], row[-1]); return 0; } } } if (mask != 0 && (*row & mask) != (*orig & mask)) { display_log(dp, APP_FAIL, "significant bits at (%lu[end],%lu) changed", x, y); return 0; } } /* for y */ } } return 1; /* compare succeeded */ } Commit Message: DO NOT MERGE Update libpng to 1.6.20 BUG:23265085 Change-Id: I85199805636d771f3597b691b63bc0bf46084833 (cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82) CWE ID:
compare_read(struct display *dp, int applied_transforms) { /* Compare the png_info from read_ip with original_info */ size_t rowbytes; png_uint_32 width, height; int bit_depth, color_type; int interlace_method, compression_method, filter_method; const char *e = NULL; png_get_IHDR(dp->read_pp, dp->read_ip, &width, &height, &bit_depth, &color_type, &interlace_method, &compression_method, &filter_method); # define C(item) if (item != dp->item) \ display_log(dp, APP_WARNING, "IHDR " #item "(%lu) changed to %lu",\ (unsigned long)dp->item, (unsigned long)item), e = #item /* The IHDR should be identical: */ C(width); C(height); C(bit_depth); C(color_type); C(interlace_method); C(compression_method); C(filter_method); /* 'e' remains set to the name of the last thing changed: */ if (e) display_log(dp, APP_ERROR, "IHDR changed (%s)", e); /* All the chunks from the original PNG should be preserved in the output PNG * because the PNG format has not been changed. */ { unsigned long chunks = png_get_valid(dp->read_pp, dp->read_ip, 0xffffffff); if (chunks != dp->chunks) display_log(dp, APP_FAIL, "PNG chunks changed from 0x%lx to 0x%lx", (unsigned long)dp->chunks, chunks); } /* rowbytes should be the same */ rowbytes = png_get_rowbytes(dp->read_pp, dp->read_ip); /* NOTE: on 64-bit systems this may trash the top bits of rowbytes, * which could lead to weird error messages. */ if (rowbytes != dp->original_rowbytes) display_log(dp, APP_ERROR, "PNG rowbytes changed from %lu to %lu", (unsigned long)dp->original_rowbytes, (unsigned long)rowbytes); /* The rows should be the same too, unless the applied transforms includes * the shift transform, in which case low bits may have been lost. */ { png_bytepp rows = png_get_rows(dp->read_pp, dp->read_ip); unsigned int mask; /* mask (if not zero) for the final byte */ if (bit_depth < 8) { /* Need the stray bits at the end, this depends only on the low bits * of the image width; overflow does not matter. If the width is an * exact multiple of 8 bits this gives a mask of 0, not 0xff. */ mask = 0xff & (0xff00 >> ((bit_depth * width) & 7)); } else mask = 0; if (rows == NULL) display_log(dp, LIBPNG_BUG, "png_get_rows returned NULL"); if ((applied_transforms & PNG_TRANSFORM_SHIFT) == 0 || (dp->active_transforms & PNG_TRANSFORM_SHIFT) == 0 || color_type == PNG_COLOR_TYPE_PALETTE) { unsigned long y; for (y=0; y<height; ++y) { png_bytep row = rows[y]; png_bytep orig = dp->original_rows[y]; if (memcmp(row, orig, rowbytes-(mask != 0)) != 0 || (mask != 0 && ((row[rowbytes-1] & mask) != (orig[rowbytes-1] & mask)))) { size_t x; /* Find the first error */ for (x=0; x<rowbytes-1; ++x) if (row[x] != orig[x]) break; display_log(dp, APP_FAIL, "byte(%lu,%lu) changed 0x%.2x -> 0x%.2x", (unsigned long)x, (unsigned long)y, orig[x], row[x]); return 0; /* don't keep reporting failed rows on 'continue' */ } } } else { unsigned long y; int bpp; /* bits-per-pixel then bytes-per-pixel */ /* components are up to 8 bytes in size */ png_byte sig_bits[8]; png_color_8p sBIT; if (png_get_sBIT(dp->read_pp, dp->read_ip, &sBIT) != PNG_INFO_sBIT) display_log(dp, INTERNAL_ERROR, "active shift transform but no sBIT in file"); switch (color_type) { case PNG_COLOR_TYPE_GRAY: sig_bits[0] = sBIT->gray; bpp = bit_depth; break; case PNG_COLOR_TYPE_GA: sig_bits[0] = sBIT->gray; sig_bits[1] = sBIT->alpha; bpp = 2 * bit_depth; break; case PNG_COLOR_TYPE_RGB: sig_bits[0] = sBIT->red; sig_bits[1] = sBIT->green; sig_bits[2] = sBIT->blue; bpp = 3 * bit_depth; break; case PNG_COLOR_TYPE_RGBA: sig_bits[0] = sBIT->red; sig_bits[1] = sBIT->green; sig_bits[2] = sBIT->blue; sig_bits[3] = sBIT->alpha; bpp = 4 * bit_depth; break; default: display_log(dp, LIBPNG_ERROR, "invalid colour type %d", color_type); /*NOTREACHED*/ bpp = 0; break; } { int b; for (b=0; 8*b<bpp; ++b) { /* libpng should catch this; if not there is a security issue * because an app (like this one) may overflow an array. In fact * libpng doesn't catch this at present. */ if (sig_bits[b] == 0 || sig_bits[b] > bit_depth/*!palette*/) display_log(dp, LIBPNG_BUG, "invalid sBIT[%u] value %d returned for PNG bit depth %d", b, sig_bits[b], bit_depth); } } if (bpp < 8 && bpp != bit_depth) { /* sanity check; this is a grayscale PNG; something is wrong in the * code above. */ display_log(dp, INTERNAL_ERROR, "invalid bpp %u for bit_depth %u", bpp, bit_depth); } switch (bit_depth) { int b; case 16: /* Two bytes per component, big-endian */ for (b = (bpp >> 4); b > 0; --b) { unsigned int sig = (unsigned int)(0xffff0000 >> sig_bits[b]); sig_bits[2*b+1] = (png_byte)sig; sig_bits[2*b+0] = (png_byte)(sig >> 8); /* big-endian */ } break; case 8: /* One byte per component */ for (b=0; b*8 < bpp; ++b) sig_bits[b] = (png_byte)(0xff00 >> sig_bits[b]); break; case 1: /* allowed, but dumb */ /* Value is 1 */ sig_bits[0] = 0xff; break; case 2: /* Replicate 4 times */ /* Value is 1 or 2 */ b = 0x3 & ((0x3<<2) >> sig_bits[0]); b |= b << 2; b |= b << 4; sig_bits[0] = (png_byte)b; break; case 4: /* Relicate twice */ /* Value is 1, 2, 3 or 4 */ b = 0xf & ((0xf << 4) >> sig_bits[0]); b |= b << 4; sig_bits[0] = (png_byte)b; break; default: display_log(dp, LIBPNG_BUG, "invalid bit depth %d", bit_depth); break; } /* Convert bpp to bytes; this gives '1' for low-bit depth grayscale, * where there are multiple pixels per byte. */ bpp = (bpp+7) >> 3; /* The mask can be combined with sig_bits[0] */ if (mask != 0) { mask &= sig_bits[0]; if (bpp != 1 || mask == 0) display_log(dp, INTERNAL_ERROR, "mask calculation error %u, %u", bpp, mask); } for (y=0; y<height; ++y) { png_bytep row = rows[y]; png_bytep orig = dp->original_rows[y]; unsigned long x; for (x=0; x<(width-(mask!=0)); ++x) { int b; for (b=0; b<bpp; ++b) { if ((*row++ & sig_bits[b]) != (*orig++ & sig_bits[b])) { display_log(dp, APP_FAIL, "significant bits at (%lu[%u],%lu) changed %.2x->%.2x", x, b, y, orig[-1], row[-1]); return 0; } } } if (mask != 0 && (*row & mask) != (*orig & mask)) { display_log(dp, APP_FAIL, "significant bits at (%lu[end],%lu) changed", x, y); return 0; } } /* for y */ } } return 1; /* compare succeeded */ }
173,587
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, v4l2_kioctl func) { char sbuf[128]; void *mbuf = NULL; void *parg = NULL; long err = -EINVAL; int is_ext_ctrl; size_t ctrls_size = 0; void __user *user_ptr = NULL; is_ext_ctrl = (cmd == VIDIOC_S_EXT_CTRLS || cmd == VIDIOC_G_EXT_CTRLS || cmd == VIDIOC_TRY_EXT_CTRLS); /* Copy arguments into temp kernel buffer */ switch (_IOC_DIR(cmd)) { case _IOC_NONE: parg = NULL; break; case _IOC_READ: case _IOC_WRITE: case (_IOC_WRITE | _IOC_READ): if (_IOC_SIZE(cmd) <= sizeof(sbuf)) { parg = sbuf; } else { /* too big to allocate from stack */ mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL); if (NULL == mbuf) return -ENOMEM; parg = mbuf; } err = -EFAULT; if (_IOC_DIR(cmd) & _IOC_WRITE) if (copy_from_user(parg, (void __user *)arg, _IOC_SIZE(cmd))) goto out; break; } if (is_ext_ctrl) { struct v4l2_ext_controls *p = parg; /* In case of an error, tell the caller that it wasn't a specific control that caused it. */ p->error_idx = p->count; user_ptr = (void __user *)p->controls; if (p->count) { ctrls_size = sizeof(struct v4l2_ext_control) * p->count; /* Note: v4l2_ext_controls fits in sbuf[] so mbuf is still NULL. */ mbuf = kmalloc(ctrls_size, GFP_KERNEL); err = -ENOMEM; if (NULL == mbuf) goto out_ext_ctrl; err = -EFAULT; if (copy_from_user(mbuf, user_ptr, ctrls_size)) goto out_ext_ctrl; p->controls = mbuf; } } /* call driver */ err = func(file, cmd, parg); if (err == -ENOIOCTLCMD) err = -EINVAL; if (is_ext_ctrl) { struct v4l2_ext_controls *p = parg; p->controls = (void *)user_ptr; if (p->count && err == 0 && copy_to_user(user_ptr, mbuf, ctrls_size)) err = -EFAULT; goto out_ext_ctrl; } if (err < 0) goto out; out_ext_ctrl: /* Copy results into user buffer */ switch (_IOC_DIR(cmd)) { case _IOC_READ: case (_IOC_WRITE | _IOC_READ): if (copy_to_user((void __user *)arg, parg, _IOC_SIZE(cmd))) err = -EFAULT; break; } out: kfree(mbuf); return err; } Commit Message: [media] v4l: Share code between video_usercopy and video_ioctl2 The two functions are mostly identical. They handle the copy_from_user and copy_to_user operations related with V4L2 ioctls and call the real ioctl handler. Create a __video_usercopy function that implements the core of video_usercopy and video_ioctl2, and call that function from both. Signed-off-by: Laurent Pinchart <[email protected]> Acked-by: Hans Verkuil <[email protected]> Signed-off-by: Mauro Carvalho Chehab <[email protected]> CWE ID: CWE-399
video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
168,916
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void CoordinatorImpl::RegisterClientProcess( mojom::ClientProcessPtr client_process_ptr, mojom::ProcessType process_type) { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); mojom::ClientProcess* client_process = client_process_ptr.get(); client_process_ptr.set_connection_error_handler( base::BindOnce(&CoordinatorImpl::UnregisterClientProcess, base::Unretained(this), client_process)); auto identity = GetClientIdentityForCurrentRequest(); auto client_info = std::make_unique<ClientInfo>( std::move(identity), std::move(client_process_ptr), process_type); auto iterator_and_inserted = clients_.emplace(client_process, std::move(client_info)); DCHECK(iterator_and_inserted.second); } Commit Message: Fix heap-use-after-free by using weak factory instead of Unretained Bug: 856578 Change-Id: Ifb2a1b7e6c22e1af36e12eedba72427f51d925b9 Reviewed-on: https://chromium-review.googlesource.com/1114617 Reviewed-by: Hector Dearman <[email protected]> Commit-Queue: Hector Dearman <[email protected]> Cr-Commit-Position: refs/heads/master@{#571528} CWE ID: CWE-416
void CoordinatorImpl::RegisterClientProcess( mojom::ClientProcessPtr client_process_ptr, mojom::ProcessType process_type) { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); mojom::ClientProcess* client_process = client_process_ptr.get(); client_process_ptr.set_connection_error_handler( base::BindOnce(&CoordinatorImpl::UnregisterClientProcess, weak_ptr_factory_.GetWeakPtr(), client_process)); auto identity = GetClientIdentityForCurrentRequest(); auto client_info = std::make_unique<ClientInfo>( std::move(identity), std::move(client_process_ptr), process_type); auto iterator_and_inserted = clients_.emplace(client_process, std::move(client_info)); DCHECK(iterator_and_inserted.second); }
173,215
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: long ssl3_ctx_ctrl(SSL_CTX *ctx, int cmd, long larg, void *parg) { CERT *cert; cert = ctx->cert; switch (cmd) { #ifndef OPENSSL_NO_RSA case SSL_CTRL_NEED_TMP_RSA: if ((cert->rsa_tmp == NULL) && ((cert->pkeys[SSL_PKEY_RSA_ENC].privatekey == NULL) || (EVP_PKEY_size(cert->pkeys[SSL_PKEY_RSA_ENC].privatekey) > (512 / 8))) ) return (1); else return (0); /* break; */ case SSL_CTRL_SET_TMP_RSA: { RSA *rsa; int i; rsa = (RSA *)parg; i = 1; if (rsa == NULL) i = 0; else { if ((rsa = RSAPrivateKey_dup(rsa)) == NULL) i = 0; } if (!i) { SSLerr(SSL_F_SSL3_CTX_CTRL, ERR_R_RSA_LIB); return (0); } else { if (cert->rsa_tmp != NULL) RSA_free(cert->rsa_tmp); cert->rsa_tmp = rsa; return (1); } } /* break; */ case SSL_CTRL_SET_TMP_RSA_CB: { SSLerr(SSL_F_SSL3_CTX_CTRL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return (0); } break; #endif SSLerr(SSL_F_SSL3_CTX_CTRL, ERR_R_DH_LIB); return 0; } if (!(ctx->options & SSL_OP_SINGLE_DH_USE)) { if (!DH_generate_key(new)) { SSLerr(SSL_F_SSL3_CTX_CTRL, ERR_R_DH_LIB); DH_free(new); return 0; } } if (cert->dh_tmp != NULL) DH_free(cert->dh_tmp); cert->dh_tmp = new; if ((new = DHparams_dup(dh)) == NULL) { SSLerr(SSL_F_SSL3_CTX_CTRL, ERR_R_DH_LIB); return 0; } if (!(ctx->options & SSL_OP_SINGLE_DH_USE)) { if (!DH_generate_key(new)) { SSLerr(SSL_F_SSL3_CTX_CTRL, ERR_R_DH_LIB); DH_free(new); return 0; } } if (cert->dh_tmp != NULL) DH_free(cert->dh_tmp); cert->dh_tmp = new; return 1; } Commit Message: CWE ID: CWE-200
long ssl3_ctx_ctrl(SSL_CTX *ctx, int cmd, long larg, void *parg) { CERT *cert; cert = ctx->cert; switch (cmd) { #ifndef OPENSSL_NO_RSA case SSL_CTRL_NEED_TMP_RSA: if ((cert->rsa_tmp == NULL) && ((cert->pkeys[SSL_PKEY_RSA_ENC].privatekey == NULL) || (EVP_PKEY_size(cert->pkeys[SSL_PKEY_RSA_ENC].privatekey) > (512 / 8))) ) return (1); else return (0); /* break; */ case SSL_CTRL_SET_TMP_RSA: { RSA *rsa; int i; rsa = (RSA *)parg; i = 1; if (rsa == NULL) i = 0; else { if ((rsa = RSAPrivateKey_dup(rsa)) == NULL) i = 0; } if (!i) { SSLerr(SSL_F_SSL3_CTX_CTRL, ERR_R_RSA_LIB); return (0); } else { if (cert->rsa_tmp != NULL) RSA_free(cert->rsa_tmp); cert->rsa_tmp = rsa; return (1); } } /* break; */ case SSL_CTRL_SET_TMP_RSA_CB: { SSLerr(SSL_F_SSL3_CTX_CTRL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return (0); } break; #endif SSLerr(SSL_F_SSL3_CTX_CTRL, ERR_R_DH_LIB); return 0; } if (cert->dh_tmp != NULL) DH_free(cert->dh_tmp); cert->dh_tmp = new; if ((new = DHparams_dup(dh)) == NULL) { SSLerr(SSL_F_SSL3_CTX_CTRL, ERR_R_DH_LIB); return 0; } if (!(ctx->options & SSL_OP_SINGLE_DH_USE)) { if (!DH_generate_key(new)) { SSLerr(SSL_F_SSL3_CTX_CTRL, ERR_R_DH_LIB); DH_free(new); return 0; } } if (cert->dh_tmp != NULL) DH_free(cert->dh_tmp); cert->dh_tmp = new; return 1; }
165,256
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) { struct super_block *sb = inode->i_sb; handle_t *handle; struct ext4_ext_path *path; struct ext4_extent *extent; ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0; unsigned int credits, ee_len; int ret = 0, depth, split_flag = 0; loff_t ioffset; /* * We need to test this early because xfstests assumes that an * insert range of (0, 1) will return EOPNOTSUPP if the file * system does not support insert range. */ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) return -EOPNOTSUPP; /* Insert range works only on fs block size aligned offsets. */ if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) || len & (EXT4_CLUSTER_SIZE(sb) - 1)) return -EINVAL; if (!S_ISREG(inode->i_mode)) return -EOPNOTSUPP; trace_ext4_insert_range(inode, offset, len); offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb); len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb); /* Call ext4_force_commit to flush all data in case of data=journal */ if (ext4_should_journal_data(inode)) { ret = ext4_force_commit(inode->i_sb); if (ret) return ret; } /* * Need to round down to align start offset to page size boundary * for page size > block size. */ ioffset = round_down(offset, PAGE_SIZE); /* Write out all dirty pages */ ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, LLONG_MAX); if (ret) return ret; /* Take mutex lock */ mutex_lock(&inode->i_mutex); /* Currently just for extent based files */ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { ret = -EOPNOTSUPP; goto out_mutex; } /* Check for wrap through zero */ if (inode->i_size + len > inode->i_sb->s_maxbytes) { ret = -EFBIG; goto out_mutex; } /* Offset should be less than i_size */ if (offset >= i_size_read(inode)) { ret = -EINVAL; goto out_mutex; } truncate_pagecache(inode, ioffset); /* Wait for existing dio to complete */ ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); credits = ext4_writepage_trans_blocks(inode); handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out_dio; } /* Expand file to avoid data loss if there is error while shifting */ inode->i_size += len; EXT4_I(inode)->i_disksize += len; inode->i_mtime = inode->i_ctime = ext4_current_time(inode); ret = ext4_mark_inode_dirty(handle, inode); if (ret) goto out_stop; down_write(&EXT4_I(inode)->i_data_sem); ext4_discard_preallocations(inode); path = ext4_find_extent(inode, offset_lblk, NULL, 0); if (IS_ERR(path)) { up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } depth = ext_depth(inode); extent = path[depth].p_ext; if (extent) { ee_start_lblk = le32_to_cpu(extent->ee_block); ee_len = ext4_ext_get_actual_len(extent); /* * If offset_lblk is not the starting block of extent, split * the extent @offset_lblk */ if ((offset_lblk > ee_start_lblk) && (offset_lblk < (ee_start_lblk + ee_len))) { if (ext4_ext_is_unwritten(extent)) split_flag = EXT4_EXT_MARK_UNWRIT1 | EXT4_EXT_MARK_UNWRIT2; ret = ext4_split_extent_at(handle, inode, &path, offset_lblk, split_flag, EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO | EXT4_GET_BLOCKS_METADATA_NOFAIL); } ext4_ext_drop_refs(path); kfree(path); if (ret < 0) { up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } } ret = ext4_es_remove_extent(inode, offset_lblk, EXT_MAX_BLOCKS - offset_lblk); if (ret) { up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } /* * if offset_lblk lies in a hole which is at start of file, use * ee_start_lblk to shift extents */ ret = ext4_ext_shift_extents(inode, handle, ee_start_lblk > offset_lblk ? ee_start_lblk : offset_lblk, len_lblk, SHIFT_RIGHT); up_write(&EXT4_I(inode)->i_data_sem); if (IS_SYNC(inode)) ext4_handle_sync(handle); out_stop: ext4_journal_stop(handle); out_dio: ext4_inode_resume_unlocked_dio(inode); out_mutex: mutex_unlock(&inode->i_mutex); return ret; } Commit Message: ext4: fix races between page faults and hole punching Currently, page faults and hole punching are completely unsynchronized. This can result in page fault faulting in a page into a range that we are punching after truncate_pagecache_range() has been called and thus we can end up with a page mapped to disk blocks that will be shortly freed. Filesystem corruption will shortly follow. Note that the same race is avoided for truncate by checking page fault offset against i_size but there isn't similar mechanism available for punching holes. Fix the problem by creating new rw semaphore i_mmap_sem in inode and grab it for writing over truncate, hole punching, and other functions removing blocks from extent tree and for read over page faults. We cannot easily use i_data_sem for this since that ranks below transaction start and we need something ranking above it so that it can be held over the whole truncate / hole punching operation. Also remove various workarounds we had in the code to reduce race window when page fault could have created pages with stale mapping information. Signed-off-by: Jan Kara <[email protected]> Signed-off-by: Theodore Ts'o <[email protected]> CWE ID: CWE-362
int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) { struct super_block *sb = inode->i_sb; handle_t *handle; struct ext4_ext_path *path; struct ext4_extent *extent; ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0; unsigned int credits, ee_len; int ret = 0, depth, split_flag = 0; loff_t ioffset; /* * We need to test this early because xfstests assumes that an * insert range of (0, 1) will return EOPNOTSUPP if the file * system does not support insert range. */ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) return -EOPNOTSUPP; /* Insert range works only on fs block size aligned offsets. */ if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) || len & (EXT4_CLUSTER_SIZE(sb) - 1)) return -EINVAL; if (!S_ISREG(inode->i_mode)) return -EOPNOTSUPP; trace_ext4_insert_range(inode, offset, len); offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb); len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb); /* Call ext4_force_commit to flush all data in case of data=journal */ if (ext4_should_journal_data(inode)) { ret = ext4_force_commit(inode->i_sb); if (ret) return ret; } /* * Need to round down to align start offset to page size boundary * for page size > block size. */ ioffset = round_down(offset, PAGE_SIZE); /* Write out all dirty pages */ ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, LLONG_MAX); if (ret) return ret; /* Take mutex lock */ mutex_lock(&inode->i_mutex); /* Currently just for extent based files */ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { ret = -EOPNOTSUPP; goto out_mutex; } /* Check for wrap through zero */ if (inode->i_size + len > inode->i_sb->s_maxbytes) { ret = -EFBIG; goto out_mutex; } /* Offset should be less than i_size */ if (offset >= i_size_read(inode)) { ret = -EINVAL; goto out_mutex; } /* Wait for existing dio to complete */ ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); /* * Prevent page faults from reinstantiating pages we have released from * page cache. */ down_write(&EXT4_I(inode)->i_mmap_sem); truncate_pagecache(inode, ioffset); credits = ext4_writepage_trans_blocks(inode); handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out_mmap; } /* Expand file to avoid data loss if there is error while shifting */ inode->i_size += len; EXT4_I(inode)->i_disksize += len; inode->i_mtime = inode->i_ctime = ext4_current_time(inode); ret = ext4_mark_inode_dirty(handle, inode); if (ret) goto out_stop; down_write(&EXT4_I(inode)->i_data_sem); ext4_discard_preallocations(inode); path = ext4_find_extent(inode, offset_lblk, NULL, 0); if (IS_ERR(path)) { up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } depth = ext_depth(inode); extent = path[depth].p_ext; if (extent) { ee_start_lblk = le32_to_cpu(extent->ee_block); ee_len = ext4_ext_get_actual_len(extent); /* * If offset_lblk is not the starting block of extent, split * the extent @offset_lblk */ if ((offset_lblk > ee_start_lblk) && (offset_lblk < (ee_start_lblk + ee_len))) { if (ext4_ext_is_unwritten(extent)) split_flag = EXT4_EXT_MARK_UNWRIT1 | EXT4_EXT_MARK_UNWRIT2; ret = ext4_split_extent_at(handle, inode, &path, offset_lblk, split_flag, EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO | EXT4_GET_BLOCKS_METADATA_NOFAIL); } ext4_ext_drop_refs(path); kfree(path); if (ret < 0) { up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } } ret = ext4_es_remove_extent(inode, offset_lblk, EXT_MAX_BLOCKS - offset_lblk); if (ret) { up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } /* * if offset_lblk lies in a hole which is at start of file, use * ee_start_lblk to shift extents */ ret = ext4_ext_shift_extents(inode, handle, ee_start_lblk > offset_lblk ? ee_start_lblk : offset_lblk, len_lblk, SHIFT_RIGHT); up_write(&EXT4_I(inode)->i_data_sem); if (IS_SYNC(inode)) ext4_handle_sync(handle); out_stop: ext4_journal_stop(handle); out_mmap: up_write(&EXT4_I(inode)->i_mmap_sem); ext4_inode_resume_unlocked_dio(inode); out_mutex: mutex_unlock(&inode->i_mutex); return ret; }
167,484
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: long Tracks::ParseTrackEntry(long long track_start, long long track_size, long long element_start, long long element_size, Track*& pResult) const { if (pResult) return -1; IMkvReader* const pReader = m_pSegment->m_pReader; long long pos = track_start; const long long track_stop = track_start + track_size; Track::Info info; info.type = 0; info.number = 0; info.uid = 0; info.defaultDuration = 0; Track::Settings v; v.start = -1; v.size = -1; Track::Settings a; a.start = -1; a.size = -1; Track::Settings e; // content_encodings_settings; e.start = -1; e.size = -1; long long lacing = 1; // default is true while (pos < track_stop) { long long id, size; const long status = ParseElementHeader(pReader, pos, track_stop, id, size); if (status < 0) // error return status; if (size < 0) return E_FILE_FORMAT_INVALID; const long long start = pos; if (id == 0x60) { // VideoSettings ID v.start = start; v.size = size; } else if (id == 0x61) { // AudioSettings ID a.start = start; a.size = size; } else if (id == 0x2D80) { // ContentEncodings ID e.start = start; e.size = size; } else if (id == 0x33C5) { // Track UID if (size > 8) return E_FILE_FORMAT_INVALID; info.uid = 0; long long pos_ = start; const long long pos_end = start + size; while (pos_ != pos_end) { unsigned char b; const int status = pReader->Read(pos_, 1, &b); if (status) return status; info.uid <<= 8; info.uid |= b; ++pos_; } } else if (id == 0x57) { // Track Number const long long num = UnserializeUInt(pReader, pos, size); if ((num <= 0) || (num > 127)) return E_FILE_FORMAT_INVALID; info.number = static_cast<long>(num); } else if (id == 0x03) { // Track Type const long long type = UnserializeUInt(pReader, pos, size); if ((type <= 0) || (type > 254)) return E_FILE_FORMAT_INVALID; info.type = static_cast<long>(type); } else if (id == 0x136E) { // Track Name const long status = UnserializeString(pReader, pos, size, info.nameAsUTF8); if (status) return status; } else if (id == 0x02B59C) { // Track Language const long status = UnserializeString(pReader, pos, size, info.language); if (status) return status; } else if (id == 0x03E383) { // Default Duration const long long duration = UnserializeUInt(pReader, pos, size); if (duration < 0) return E_FILE_FORMAT_INVALID; info.defaultDuration = static_cast<unsigned long long>(duration); } else if (id == 0x06) { // CodecID const long status = UnserializeString(pReader, pos, size, info.codecId); if (status) return status; } else if (id == 0x1C) { // lacing lacing = UnserializeUInt(pReader, pos, size); if ((lacing < 0) || (lacing > 1)) return E_FILE_FORMAT_INVALID; } else if (id == 0x23A2) { // Codec Private delete[] info.codecPrivate; info.codecPrivate = NULL; info.codecPrivateSize = 0; const size_t buflen = static_cast<size_t>(size); if (buflen) { typedef unsigned char* buf_t; const buf_t buf = new (std::nothrow) unsigned char[buflen]; if (buf == NULL) return -1; const int status = pReader->Read(pos, static_cast<long>(buflen), buf); if (status) { delete[] buf; return status; } info.codecPrivate = buf; info.codecPrivateSize = buflen; } } else if (id == 0x058688) { // Codec Name const long status = UnserializeString(pReader, pos, size, info.codecNameAsUTF8); if (status) return status; } else if (id == 0x16AA) { // Codec Delay info.codecDelay = UnserializeUInt(pReader, pos, size); } else if (id == 0x16BB) { // Seek Pre Roll info.seekPreRoll = UnserializeUInt(pReader, pos, size); } pos += size; // consume payload assert(pos <= track_stop); } assert(pos == track_stop); if (info.number <= 0) // not specified return E_FILE_FORMAT_INVALID; if (GetTrackByNumber(info.number)) return E_FILE_FORMAT_INVALID; if (info.type <= 0) // not specified return E_FILE_FORMAT_INVALID; info.lacing = (lacing > 0) ? true : false; if (info.type == Track::kVideo) { if (v.start < 0) return E_FILE_FORMAT_INVALID; if (a.start >= 0) return E_FILE_FORMAT_INVALID; info.settings = v; VideoTrack* pTrack = NULL; const long status = VideoTrack::Parse(m_pSegment, info, element_start, element_size, pTrack); if (status) return status; pResult = pTrack; assert(pResult); if (e.start >= 0) pResult->ParseContentEncodingsEntry(e.start, e.size); } else if (info.type == Track::kAudio) { if (a.start < 0) return E_FILE_FORMAT_INVALID; if (v.start >= 0) return E_FILE_FORMAT_INVALID; info.settings = a; AudioTrack* pTrack = NULL; const long status = AudioTrack::Parse(m_pSegment, info, element_start, element_size, pTrack); if (status) return status; pResult = pTrack; assert(pResult); if (e.start >= 0) pResult->ParseContentEncodingsEntry(e.start, e.size); } else { if (a.start >= 0) return E_FILE_FORMAT_INVALID; if (v.start >= 0) return E_FILE_FORMAT_INVALID; if (info.type == Track::kMetadata && e.start >= 0) return E_FILE_FORMAT_INVALID; info.settings.start = -1; info.settings.size = 0; Track* pTrack = NULL; const long status = Track::Create(m_pSegment, info, element_start, element_size, pTrack); if (status) return status; pResult = pTrack; assert(pResult); } return 0; // success } Commit Message: external/libvpx/libwebm: Update snapshot Update libwebm snapshot. This update contains security fixes from upstream. Upstream git hash: 229f49347d19b0ca0941e072b199a242ef6c5f2b BUG=23167726 Change-Id: Id3e140e7b31ae11294724b1ecfe2e9c83b4d4207 (cherry picked from commit d0281a15b3c6bd91756e453cc9398c5ef412d99a) CWE ID: CWE-20
long Tracks::ParseTrackEntry(long long track_start, long long track_size, long long element_start, long long element_size, Track*& pResult) const { if (pResult) return -1; IMkvReader* const pReader = m_pSegment->m_pReader; long long pos = track_start; const long long track_stop = track_start + track_size; Track::Info info; info.type = 0; info.number = 0; info.uid = 0; info.defaultDuration = 0; Track::Settings v; v.start = -1; v.size = -1; Track::Settings a; a.start = -1; a.size = -1; Track::Settings e; // content_encodings_settings; e.start = -1; e.size = -1; long long lacing = 1; // default is true while (pos < track_stop) { long long id, size; const long status = ParseElementHeader(pReader, pos, track_stop, id, size); if (status < 0) // error return status; if (size < 0) return E_FILE_FORMAT_INVALID; const long long start = pos; if (id == 0x60) { // VideoSettings ID v.start = start; v.size = size; } else if (id == 0x61) { // AudioSettings ID a.start = start; a.size = size; } else if (id == 0x2D80) { // ContentEncodings ID e.start = start; e.size = size; } else if (id == 0x33C5) { // Track UID if (size > 8) return E_FILE_FORMAT_INVALID; info.uid = 0; long long pos_ = start; const long long pos_end = start + size; while (pos_ != pos_end) { unsigned char b; const int status = pReader->Read(pos_, 1, &b); if (status) return status; info.uid <<= 8; info.uid |= b; ++pos_; } } else if (id == 0x57) { // Track Number const long long num = UnserializeUInt(pReader, pos, size); if ((num <= 0) || (num > 127)) return E_FILE_FORMAT_INVALID; info.number = static_cast<long>(num); } else if (id == 0x03) { // Track Type const long long type = UnserializeUInt(pReader, pos, size); if ((type <= 0) || (type > 254)) return E_FILE_FORMAT_INVALID; info.type = static_cast<long>(type); } else if (id == 0x136E) { // Track Name const long status = UnserializeString(pReader, pos, size, info.nameAsUTF8); if (status) return status; } else if (id == 0x02B59C) { // Track Language const long status = UnserializeString(pReader, pos, size, info.language); if (status) return status; } else if (id == 0x03E383) { // Default Duration const long long duration = UnserializeUInt(pReader, pos, size); if (duration < 0) return E_FILE_FORMAT_INVALID; info.defaultDuration = static_cast<unsigned long long>(duration); } else if (id == 0x06) { // CodecID const long status = UnserializeString(pReader, pos, size, info.codecId); if (status) return status; } else if (id == 0x1C) { // lacing lacing = UnserializeUInt(pReader, pos, size); if ((lacing < 0) || (lacing > 1)) return E_FILE_FORMAT_INVALID; } else if (id == 0x23A2) { // Codec Private delete[] info.codecPrivate; info.codecPrivate = NULL; info.codecPrivateSize = 0; const size_t buflen = static_cast<size_t>(size); if (buflen) { unsigned char* buf = SafeArrayAlloc<unsigned char>(1, buflen); if (buf == NULL) return -1; const int status = pReader->Read(pos, static_cast<long>(buflen), buf); if (status) { delete[] buf; return status; } info.codecPrivate = buf; info.codecPrivateSize = buflen; } } else if (id == 0x058688) { // Codec Name const long status = UnserializeString(pReader, pos, size, info.codecNameAsUTF8); if (status) return status; } else if (id == 0x16AA) { // Codec Delay info.codecDelay = UnserializeUInt(pReader, pos, size); } else if (id == 0x16BB) { // Seek Pre Roll info.seekPreRoll = UnserializeUInt(pReader, pos, size); } pos += size; // consume payload if (pos > track_stop) return E_FILE_FORMAT_INVALID; } if (pos != track_stop) return E_FILE_FORMAT_INVALID; if (info.number <= 0) // not specified return E_FILE_FORMAT_INVALID; if (GetTrackByNumber(info.number)) return E_FILE_FORMAT_INVALID; if (info.type <= 0) // not specified return E_FILE_FORMAT_INVALID; info.lacing = (lacing > 0) ? true : false; if (info.type == Track::kVideo) { if (v.start < 0) return E_FILE_FORMAT_INVALID; if (a.start >= 0) return E_FILE_FORMAT_INVALID; info.settings = v; VideoTrack* pTrack = NULL; const long status = VideoTrack::Parse(m_pSegment, info, element_start, element_size, pTrack); if (status) return status; pResult = pTrack; assert(pResult); if (e.start >= 0) pResult->ParseContentEncodingsEntry(e.start, e.size); } else if (info.type == Track::kAudio) { if (a.start < 0) return E_FILE_FORMAT_INVALID; if (v.start >= 0) return E_FILE_FORMAT_INVALID; info.settings = a; AudioTrack* pTrack = NULL; const long status = AudioTrack::Parse(m_pSegment, info, element_start, element_size, pTrack); if (status) return status; pResult = pTrack; assert(pResult); if (e.start >= 0) pResult->ParseContentEncodingsEntry(e.start, e.size); } else { if (a.start >= 0) return E_FILE_FORMAT_INVALID; if (v.start >= 0) return E_FILE_FORMAT_INVALID; if (info.type == Track::kMetadata && e.start >= 0) return E_FILE_FORMAT_INVALID; info.settings.start = -1; info.settings.size = 0; Track* pTrack = NULL; const long status = Track::Create(m_pSegment, info, element_start, element_size, pTrack); if (status) return status; pResult = pTrack; assert(pResult); } return 0; // success }
173,859
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ExtensionTtsPlatformImpl::set_error(const std::string& error) { error_ = error; } Commit Message: Extend TTS extension API to support richer events returned from the engine to the client. Previously we just had a completed event; this adds start, word boundary, sentence boundary, and marker boundary. In addition, interrupted and canceled, which were previously errors, now become events. Mac and Windows implementations extended to support as many of these events as possible. BUG=67713 BUG=70198 BUG=75106 BUG=83404 TEST=Updates all TTS API tests to be event-based, and adds new tests. Review URL: http://codereview.chromium.org/6792014 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@91665 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-20
void ExtensionTtsPlatformImpl::set_error(const std::string& error) {
170,395
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: queryin(char *buf) { QPRS_STATE state; int32 i; ltxtquery *query; int32 commonlen; ITEM *ptr; NODE *tmp; int32 pos = 0; #ifdef BS_DEBUG char pbuf[16384], *cur; #endif /* init state */ state.buf = buf; state.state = WAITOPERAND; state.count = 0; state.num = 0; state.str = NULL; /* init list of operand */ state.sumlen = 0; state.lenop = 64; state.curop = state.op = (char *) palloc(state.lenop); *(state.curop) = '\0'; /* parse query & make polish notation (postfix, but in reverse order) */ makepol(&state); if (!state.num) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("syntax error"), errdetail("Empty query."))); /* make finish struct */ commonlen = COMPUTESIZE(state.num, state.sumlen); query = (ltxtquery *) palloc(commonlen); SET_VARSIZE(query, commonlen); query->size = state.num; ptr = GETQUERY(query); /* set item in polish notation */ for (i = 0; i < state.num; i++) { ptr[i].type = state.str->type; ptr[i].val = state.str->val; ptr[i].distance = state.str->distance; ptr[i].length = state.str->length; ptr[i].flag = state.str->flag; tmp = state.str->next; pfree(state.str); state.str = tmp; } /* set user friendly-operand view */ memcpy((void *) GETOPERAND(query), (void *) state.op, state.sumlen); pfree(state.op); /* set left operand's position for every operator */ pos = 0; findoprnd(ptr, &pos); return query; } Commit Message: Predict integer overflow to avoid buffer overruns. Several functions, mostly type input functions, calculated an allocation size such that the calculation wrapped to a small positive value when arguments implied a sufficiently-large requirement. Writes past the end of the inadvertent small allocation followed shortly thereafter. Coverity identified the path_in() vulnerability; code inspection led to the rest. In passing, add check_stack_depth() to prevent stack overflow in related functions. Back-patch to 8.4 (all supported versions). The non-comment hstore changes touch code that did not exist in 8.4, so that part stops at 9.0. Noah Misch and Heikki Linnakangas, reviewed by Tom Lane. Security: CVE-2014-0064 CWE ID: CWE-189
queryin(char *buf) { QPRS_STATE state; int32 i; ltxtquery *query; int32 commonlen; ITEM *ptr; NODE *tmp; int32 pos = 0; #ifdef BS_DEBUG char pbuf[16384], *cur; #endif /* init state */ state.buf = buf; state.state = WAITOPERAND; state.count = 0; state.num = 0; state.str = NULL; /* init list of operand */ state.sumlen = 0; state.lenop = 64; state.curop = state.op = (char *) palloc(state.lenop); *(state.curop) = '\0'; /* parse query & make polish notation (postfix, but in reverse order) */ makepol(&state); if (!state.num) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("syntax error"), errdetail("Empty query."))); if (LTXTQUERY_TOO_BIG(state.num, state.sumlen)) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("ltxtquery is too large"))); commonlen = COMPUTESIZE(state.num, state.sumlen); query = (ltxtquery *) palloc(commonlen); SET_VARSIZE(query, commonlen); query->size = state.num; ptr = GETQUERY(query); /* set item in polish notation */ for (i = 0; i < state.num; i++) { ptr[i].type = state.str->type; ptr[i].val = state.str->val; ptr[i].distance = state.str->distance; ptr[i].length = state.str->length; ptr[i].flag = state.str->flag; tmp = state.str->next; pfree(state.str); state.str = tmp; } /* set user friendly-operand view */ memcpy((void *) GETOPERAND(query), (void *) state.op, state.sumlen); pfree(state.op); /* set left operand's position for every operator */ pos = 0; findoprnd(ptr, &pos); return query; }
166,408
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void spl_filesystem_dir_it_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; *data = &iterator->current; } Commit Message: Fix bug #72262 - do not overflow int CWE ID: CWE-190
static void spl_filesystem_dir_it_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; *data = &iterator->current; }
167,070
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; } Commit Message: net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared After the last patch, We are left in a state in which only drivers calling ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real hardware call ether_setup for their net_devices and don't hold any state in their skbs. There are a handful of drivers that violate this assumption of course, and need to be fixed up. This patch identifies those drivers, and marks them as not being able to support the safe transmission of skbs by clearning the IFF_TX_SKB_SHARING flag in priv_flags Signed-off-by: Neil Horman <[email protected]> CC: Karsten Keil <[email protected]> CC: "David S. Miller" <[email protected]> CC: Jay Vosburgh <[email protected]> CC: Andy Gospodarek <[email protected]> CC: Patrick McHardy <[email protected]> CC: Krzysztof Halasa <[email protected]> CC: "John W. Linville" <[email protected]> CC: Greg Kroah-Hartman <[email protected]> CC: Marcel Holtmann <[email protected]> CC: Johannes Berg <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-264
static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
165,739
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ChromeRenderProcessObserver::OnSetTcmallocHeapProfiling( bool profiling, const std::string& filename_prefix) { #if !defined(OS_WIN) if (profiling) HeapProfilerStart(filename_prefix.c_str()); else HeapProfilerStop(); #endif } Commit Message: Disable tcmalloc profile files. BUG=154983 [email protected] NOTRY=true Review URL: https://chromiumcodereview.appspot.com/11087041 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@161048 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
void ChromeRenderProcessObserver::OnSetTcmallocHeapProfiling(
170,666
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: send_results(struct iperf_test *test) { int r = 0; cJSON *j; cJSON *j_streams; struct iperf_stream *sp; cJSON *j_stream; int sender_has_retransmits; iperf_size_t bytes_transferred; int retransmits; j = cJSON_CreateObject(); if (j == NULL) { i_errno = IEPACKAGERESULTS; r = -1; } else { cJSON_AddFloatToObject(j, "cpu_util_total", test->cpu_util[0]); cJSON_AddFloatToObject(j, "cpu_util_user", test->cpu_util[1]); cJSON_AddFloatToObject(j, "cpu_util_system", test->cpu_util[2]); if ( ! test->sender ) sender_has_retransmits = -1; else sender_has_retransmits = test->sender_has_retransmits; cJSON_AddIntToObject(j, "sender_has_retransmits", sender_has_retransmits); /* If on the server and sending server output, then do this */ if (test->role == 's' && test->get_server_output) { if (test->json_output) { /* Add JSON output */ cJSON_AddItemReferenceToObject(j, "server_output_json", test->json_top); } else { /* Add textual output */ size_t buflen = 0; /* Figure out how much room we need to hold the complete output string */ struct iperf_textline *t; TAILQ_FOREACH(t, &(test->server_output_list), textlineentries) { buflen += strlen(t->line); } /* Allocate and build it up from the component lines */ char *output = calloc(buflen + 1, 1); TAILQ_FOREACH(t, &(test->server_output_list), textlineentries) { strncat(output, t->line, buflen); buflen -= strlen(t->line); } cJSON_AddStringToObject(j, "server_output_text", output); } } j_streams = cJSON_CreateArray(); if (j_streams == NULL) { i_errno = IEPACKAGERESULTS; r = -1; } else { cJSON_AddItemToObject(j, "streams", j_streams); SLIST_FOREACH(sp, &test->streams, streams) { j_stream = cJSON_CreateObject(); if (j_stream == NULL) { i_errno = IEPACKAGERESULTS; r = -1; } else { cJSON_AddItemToArray(j_streams, j_stream); bytes_transferred = test->sender ? sp->result->bytes_sent : sp->result->bytes_received; retransmits = (test->sender && test->sender_has_retransmits) ? sp->result->stream_retrans : -1; cJSON_AddIntToObject(j_stream, "id", sp->id); cJSON_AddIntToObject(j_stream, "bytes", bytes_transferred); cJSON_AddIntToObject(j_stream, "retransmits", retransmits); cJSON_AddFloatToObject(j_stream, "jitter", sp->jitter); cJSON_AddIntToObject(j_stream, "errors", sp->cnt_error); cJSON_AddIntToObject(j_stream, "packets", sp->packet_count); } } if (r == 0 && test->debug) { printf("send_results\n%s\n", cJSON_Print(j)); } if (r == 0 && JSON_write(test->ctrl_sck, j) < 0) { i_errno = IESENDRESULTS; r = -1; } } cJSON_Delete(j); } return r; } Commit Message: Fix a buffer overflow / heap corruption issue that could occur if a malformed JSON string was passed on the control channel. This issue, present in the cJSON library, was already fixed upstream, so was addressed here in iperf3 by importing a newer version of cJSON (plus local ESnet modifications). Discovered and reported by Dave McDaniel, Cisco Talos. Based on a patch by @dopheide-esnet, with input from @DaveGamble. Cross-references: TALOS-CAN-0164, ESNET-SECADV-2016-0001, CVE-2016-4303 (cherry picked from commit ed94082be27d971a5e1b08b666e2c217cf470a40) Signed-off-by: Bruce A. Mah <[email protected]> CWE ID: CWE-119
send_results(struct iperf_test *test) { int r = 0; cJSON *j; cJSON *j_streams; struct iperf_stream *sp; cJSON *j_stream; int sender_has_retransmits; iperf_size_t bytes_transferred; int retransmits; j = cJSON_CreateObject(); if (j == NULL) { i_errno = IEPACKAGERESULTS; r = -1; } else { cJSON_AddNumberToObject(j, "cpu_util_total", test->cpu_util[0]); cJSON_AddNumberToObject(j, "cpu_util_user", test->cpu_util[1]); cJSON_AddNumberToObject(j, "cpu_util_system", test->cpu_util[2]); if ( ! test->sender ) sender_has_retransmits = -1; else sender_has_retransmits = test->sender_has_retransmits; cJSON_AddNumberToObject(j, "sender_has_retransmits", sender_has_retransmits); /* If on the server and sending server output, then do this */ if (test->role == 's' && test->get_server_output) { if (test->json_output) { /* Add JSON output */ cJSON_AddItemReferenceToObject(j, "server_output_json", test->json_top); } else { /* Add textual output */ size_t buflen = 0; /* Figure out how much room we need to hold the complete output string */ struct iperf_textline *t; TAILQ_FOREACH(t, &(test->server_output_list), textlineentries) { buflen += strlen(t->line); } /* Allocate and build it up from the component lines */ char *output = calloc(buflen + 1, 1); TAILQ_FOREACH(t, &(test->server_output_list), textlineentries) { strncat(output, t->line, buflen); buflen -= strlen(t->line); } cJSON_AddStringToObject(j, "server_output_text", output); } } j_streams = cJSON_CreateArray(); if (j_streams == NULL) { i_errno = IEPACKAGERESULTS; r = -1; } else { cJSON_AddItemToObject(j, "streams", j_streams); SLIST_FOREACH(sp, &test->streams, streams) { j_stream = cJSON_CreateObject(); if (j_stream == NULL) { i_errno = IEPACKAGERESULTS; r = -1; } else { cJSON_AddItemToArray(j_streams, j_stream); bytes_transferred = test->sender ? sp->result->bytes_sent : sp->result->bytes_received; retransmits = (test->sender && test->sender_has_retransmits) ? sp->result->stream_retrans : -1; cJSON_AddNumberToObject(j_stream, "id", sp->id); cJSON_AddNumberToObject(j_stream, "bytes", bytes_transferred); cJSON_AddNumberToObject(j_stream, "retransmits", retransmits); cJSON_AddNumberToObject(j_stream, "jitter", sp->jitter); cJSON_AddNumberToObject(j_stream, "errors", sp->cnt_error); cJSON_AddNumberToObject(j_stream, "packets", sp->packet_count); } } if (r == 0 && test->debug) { printf("send_results\n%s\n", cJSON_Print(j)); } if (r == 0 && JSON_write(test->ctrl_sck, j) < 0) { i_errno = IESENDRESULTS; r = -1; } } cJSON_Delete(j); } return r; }
167,317
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int ParseCaffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config) { uint32_t chan_chunk = 0, channel_layout = 0, bcount; unsigned char *channel_identities = NULL; unsigned char *channel_reorder = NULL; int64_t total_samples = 0, infilesize; CAFFileHeader caf_file_header; CAFChunkHeader caf_chunk_header; CAFAudioFormat caf_audio_format; int i; infilesize = DoGetFileSize (infile); memcpy (&caf_file_header, fourcc, 4); if ((!DoReadFile (infile, ((char *) &caf_file_header) + 4, sizeof (CAFFileHeader) - 4, &bcount) || bcount != sizeof (CAFFileHeader) - 4)) { error_line ("%s is not a valid .CAF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &caf_file_header, sizeof (CAFFileHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&caf_file_header, CAFFileHeaderFormat); if (caf_file_header.mFileVersion != 1) { error_line ("%s: can't handle version %d .CAF files!", infilename, caf_file_header.mFileVersion); return WAVPACK_SOFT_ERROR; } while (1) { if (!DoReadFile (infile, &caf_chunk_header, sizeof (CAFChunkHeader), &bcount) || bcount != sizeof (CAFChunkHeader)) { error_line ("%s is not a valid .CAF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &caf_chunk_header, sizeof (CAFChunkHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&caf_chunk_header, CAFChunkHeaderFormat); if (!strncmp (caf_chunk_header.mChunkType, "desc", 4)) { int supported = TRUE; if (caf_chunk_header.mChunkSize != sizeof (CAFAudioFormat) || !DoReadFile (infile, &caf_audio_format, (uint32_t) caf_chunk_header.mChunkSize, &bcount) || bcount != caf_chunk_header.mChunkSize) { error_line ("%s is not a valid .CAF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &caf_audio_format, (uint32_t) caf_chunk_header.mChunkSize)) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&caf_audio_format, CAFAudioFormatFormat); if (debug_logging_mode) { char formatstr [5]; memcpy (formatstr, caf_audio_format.mFormatID, 4); formatstr [4] = 0; error_line ("format = %s, flags = %x, sampling rate = %g", formatstr, caf_audio_format.mFormatFlags, caf_audio_format.mSampleRate); error_line ("packet = %d bytes and %d frames", caf_audio_format.mBytesPerPacket, caf_audio_format.mFramesPerPacket); error_line ("channels per frame = %d, bits per channel = %d", caf_audio_format.mChannelsPerFrame, caf_audio_format.mBitsPerChannel); } if (strncmp (caf_audio_format.mFormatID, "lpcm", 4) || (caf_audio_format.mFormatFlags & ~3)) supported = FALSE; else if (caf_audio_format.mSampleRate < 1.0 || caf_audio_format.mSampleRate > 16777215.0 || caf_audio_format.mSampleRate != floor (caf_audio_format.mSampleRate)) supported = FALSE; else if (!caf_audio_format.mChannelsPerFrame || caf_audio_format.mChannelsPerFrame > 256) supported = FALSE; else if (caf_audio_format.mBitsPerChannel < 1 || caf_audio_format.mBitsPerChannel > 32 || ((caf_audio_format.mFormatFlags & CAF_FORMAT_FLOAT) && caf_audio_format.mBitsPerChannel != 32)) supported = FALSE; else if (caf_audio_format.mFramesPerPacket != 1 || caf_audio_format.mBytesPerPacket / caf_audio_format.mChannelsPerFrame < (caf_audio_format.mBitsPerChannel + 7) / 8 || caf_audio_format.mBytesPerPacket / caf_audio_format.mChannelsPerFrame > 4 || caf_audio_format.mBytesPerPacket % caf_audio_format.mChannelsPerFrame) supported = FALSE; if (!supported) { error_line ("%s is an unsupported .CAF format!", infilename); return WAVPACK_SOFT_ERROR; } config->bytes_per_sample = caf_audio_format.mBytesPerPacket / caf_audio_format.mChannelsPerFrame; config->float_norm_exp = (caf_audio_format.mFormatFlags & CAF_FORMAT_FLOAT) ? 127 : 0; config->bits_per_sample = caf_audio_format.mBitsPerChannel; config->num_channels = caf_audio_format.mChannelsPerFrame; config->sample_rate = (int) caf_audio_format.mSampleRate; if (!(caf_audio_format.mFormatFlags & CAF_FORMAT_LITTLE_ENDIAN) && config->bytes_per_sample > 1) config->qmode |= QMODE_BIG_ENDIAN; if (config->bytes_per_sample == 1) config->qmode |= QMODE_SIGNED_BYTES; if (debug_logging_mode) { if (config->float_norm_exp == 127) error_line ("data format: 32-bit %s-endian floating point", (config->qmode & QMODE_BIG_ENDIAN) ? "big" : "little"); else error_line ("data format: %d-bit %s-endian integers stored in %d byte(s)", config->bits_per_sample, (config->qmode & QMODE_BIG_ENDIAN) ? "big" : "little", config->bytes_per_sample); } } else if (!strncmp (caf_chunk_header.mChunkType, "chan", 4)) { CAFChannelLayout *caf_channel_layout; if (caf_chunk_header.mChunkSize < 0 || caf_chunk_header.mChunkSize > 1024 || caf_chunk_header.mChunkSize < sizeof (CAFChannelLayout)) { error_line ("this .CAF file has an invalid 'chan' chunk!"); return WAVPACK_SOFT_ERROR; } if (debug_logging_mode) error_line ("'chan' chunk is %d bytes", (int) caf_chunk_header.mChunkSize); caf_channel_layout = malloc ((size_t) caf_chunk_header.mChunkSize); if (!DoReadFile (infile, caf_channel_layout, (uint32_t) caf_chunk_header.mChunkSize, &bcount) || bcount != caf_chunk_header.mChunkSize) { error_line ("%s is not a valid .CAF file!", infilename); free (caf_channel_layout); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, caf_channel_layout, (uint32_t) caf_chunk_header.mChunkSize)) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (caf_channel_layout); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (caf_channel_layout, CAFChannelLayoutFormat); chan_chunk = 1; if (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED)) { error_line ("this CAF file already has channel order information!"); free (caf_channel_layout); return WAVPACK_SOFT_ERROR; } switch (caf_channel_layout->mChannelLayoutTag) { case kCAFChannelLayoutTag_UseChannelDescriptions: { CAFChannelDescription *descriptions = (CAFChannelDescription *) (caf_channel_layout + 1); int num_descriptions = caf_channel_layout->mNumberChannelDescriptions; int label, cindex = 0, idents = 0; if (caf_chunk_header.mChunkSize != sizeof (CAFChannelLayout) + sizeof (CAFChannelDescription) * num_descriptions || num_descriptions != config->num_channels) { error_line ("channel descriptions in 'chan' chunk are the wrong size!"); free (caf_channel_layout); return WAVPACK_SOFT_ERROR; } if (num_descriptions >= 256) { error_line ("%d channel descriptions is more than we can handle...ignoring!"); break; } channel_reorder = malloc (num_descriptions); memset (channel_reorder, -1, num_descriptions); channel_identities = malloc (num_descriptions+1); for (i = 0; i < num_descriptions; ++i) { WavpackBigEndianToNative (descriptions + i, CAFChannelDescriptionFormat); if (debug_logging_mode) error_line ("chan %d --> %d", i + 1, descriptions [i].mChannelLabel); } for (label = 1; label <= 18; ++label) for (i = 0; i < num_descriptions; ++i) if (descriptions [i].mChannelLabel == label) { config->channel_mask |= 1 << (label - 1); channel_reorder [i] = cindex++; break; } for (i = 0; i < num_descriptions; ++i) if (channel_reorder [i] == (unsigned char) -1) { uint32_t clabel = descriptions [i].mChannelLabel; if (clabel == 0 || clabel == 0xffffffff || clabel == 100) channel_identities [idents++] = 0xff; else if ((clabel >= 33 && clabel <= 44) || (clabel >= 200 && clabel <= 207) || (clabel >= 301 && clabel <= 305)) channel_identities [idents++] = clabel >= 301 ? clabel - 80 : clabel; else { error_line ("warning: unknown channel descriptions label: %d", clabel); channel_identities [idents++] = 0xff; } channel_reorder [i] = cindex++; } for (i = 0; i < num_descriptions; ++i) if (channel_reorder [i] != i) break; if (i == num_descriptions) { free (channel_reorder); // no reordering required, so don't channel_reorder = NULL; } else { config->qmode |= QMODE_REORDERED_CHANS; // reordering required, put channel count into layout channel_layout = num_descriptions; } if (!idents) { // if no non-MS channels, free the identities string free (channel_identities); channel_identities = NULL; } else channel_identities [idents] = 0; // otherwise NULL terminate it if (debug_logging_mode) { error_line ("layout_tag = 0x%08x, so generated bitmap of 0x%08x from %d descriptions, %d non-MS", caf_channel_layout->mChannelLayoutTag, config->channel_mask, caf_channel_layout->mNumberChannelDescriptions, idents); if (channel_reorder && num_descriptions <= 8) { char reorder_string [] = "12345678"; for (i = 0; i < num_descriptions; ++i) reorder_string [i] = channel_reorder [i] + '1'; reorder_string [i] = 0; error_line ("reordering string = \"%s\"\n", reorder_string); } } } break; case kCAFChannelLayoutTag_UseChannelBitmap: config->channel_mask = caf_channel_layout->mChannelBitmap; if (debug_logging_mode) error_line ("layout_tag = 0x%08x, so using supplied bitmap of 0x%08x", caf_channel_layout->mChannelLayoutTag, caf_channel_layout->mChannelBitmap); break; default: for (i = 0; i < NUM_LAYOUTS; ++i) if (caf_channel_layout->mChannelLayoutTag == layouts [i].mChannelLayoutTag) { config->channel_mask = layouts [i].mChannelBitmap; channel_layout = layouts [i].mChannelLayoutTag; if (layouts [i].mChannelReorder) { channel_reorder = (unsigned char *) strdup (layouts [i].mChannelReorder); config->qmode |= QMODE_REORDERED_CHANS; } if (layouts [i].mChannelIdentities) channel_identities = (unsigned char *) strdup (layouts [i].mChannelIdentities); if (debug_logging_mode) error_line ("layout_tag 0x%08x found in table, bitmap = 0x%08x, reorder = %s, identities = %s", channel_layout, config->channel_mask, channel_reorder ? "yes" : "no", channel_identities ? "yes" : "no"); break; } if (i == NUM_LAYOUTS && debug_logging_mode) error_line ("layout_tag 0x%08x not found in table...all channels unassigned", caf_channel_layout->mChannelLayoutTag); break; } free (caf_channel_layout); } else if (!strncmp (caf_chunk_header.mChunkType, "data", 4)) { // on the data chunk, get size and exit loop uint32_t mEditCount; if (!DoReadFile (infile, &mEditCount, sizeof (mEditCount), &bcount) || bcount != sizeof (mEditCount)) { error_line ("%s is not a valid .CAF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &mEditCount, sizeof (mEditCount))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } if ((config->qmode & QMODE_IGNORE_LENGTH) || caf_chunk_header.mChunkSize == -1) { config->qmode |= QMODE_IGNORE_LENGTH; if (infilesize && DoGetFilePosition (infile) != -1) total_samples = (infilesize - DoGetFilePosition (infile)) / caf_audio_format.mBytesPerPacket; else total_samples = -1; } else { if (infilesize && infilesize - caf_chunk_header.mChunkSize > 16777216) { error_line (".CAF file %s has over 16 MB of extra CAFF data, probably is corrupt!", infilename); return WAVPACK_SOFT_ERROR; } if ((caf_chunk_header.mChunkSize - 4) % caf_audio_format.mBytesPerPacket) { error_line (".CAF file %s has an invalid data chunk size, probably is corrupt!", infilename); return WAVPACK_SOFT_ERROR; } total_samples = (caf_chunk_header.mChunkSize - 4) / caf_audio_format.mBytesPerPacket; if (!total_samples) { error_line ("this .CAF file has no audio samples, probably is corrupt!"); return WAVPACK_SOFT_ERROR; } if (total_samples > MAX_WAVPACK_SAMPLES) { error_line ("%s has too many samples for WavPack!", infilename); return WAVPACK_SOFT_ERROR; } } break; } else { // just copy unknown chunks to output file uint32_t bytes_to_copy = (uint32_t) caf_chunk_header.mChunkSize; char *buff; if (caf_chunk_header.mChunkSize < 0 || caf_chunk_header.mChunkSize > 1048576) { error_line ("%s is not a valid .CAF file!", infilename); return WAVPACK_SOFT_ERROR; } buff = malloc (bytes_to_copy); if (debug_logging_mode) error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes", caf_chunk_header.mChunkType [0], caf_chunk_header.mChunkType [1], caf_chunk_header.mChunkType [2], caf_chunk_header.mChunkType [3], caf_chunk_header.mChunkSize); if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) || bcount != bytes_to_copy || (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, buff, bytes_to_copy))) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (buff); return WAVPACK_SOFT_ERROR; } free (buff); } } if (!chan_chunk && !config->channel_mask && config->num_channels <= 2 && !(config->qmode & QMODE_CHANS_UNASSIGNED)) config->channel_mask = 0x5 - config->num_channels; if (!WavpackSetConfiguration64 (wpc, config, total_samples, channel_identities)) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } if (channel_identities) free (channel_identities); if (channel_layout || channel_reorder) { if (!WavpackSetChannelLayout (wpc, channel_layout, channel_reorder)) { error_line ("problem with setting channel layout (should not happen)"); return WAVPACK_SOFT_ERROR; } if (channel_reorder) free (channel_reorder); } return WAVPACK_NO_ERROR; } Commit Message: issue #66: make sure CAF files have a "desc" chunk CWE ID: CWE-665
int ParseCaffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config) { uint32_t chan_chunk = 0, desc_chunk = 0, channel_layout = 0, bcount; unsigned char *channel_identities = NULL; unsigned char *channel_reorder = NULL; int64_t total_samples = 0, infilesize; CAFFileHeader caf_file_header; CAFChunkHeader caf_chunk_header; CAFAudioFormat caf_audio_format; int i; infilesize = DoGetFileSize (infile); memcpy (&caf_file_header, fourcc, 4); if ((!DoReadFile (infile, ((char *) &caf_file_header) + 4, sizeof (CAFFileHeader) - 4, &bcount) || bcount != sizeof (CAFFileHeader) - 4)) { error_line ("%s is not a valid .CAF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &caf_file_header, sizeof (CAFFileHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&caf_file_header, CAFFileHeaderFormat); if (caf_file_header.mFileVersion != 1) { error_line ("%s: can't handle version %d .CAF files!", infilename, caf_file_header.mFileVersion); return WAVPACK_SOFT_ERROR; } while (1) { if (!DoReadFile (infile, &caf_chunk_header, sizeof (CAFChunkHeader), &bcount) || bcount != sizeof (CAFChunkHeader)) { error_line ("%s is not a valid .CAF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &caf_chunk_header, sizeof (CAFChunkHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&caf_chunk_header, CAFChunkHeaderFormat); if (!strncmp (caf_chunk_header.mChunkType, "desc", 4)) { int supported = TRUE; if (caf_chunk_header.mChunkSize != sizeof (CAFAudioFormat) || !DoReadFile (infile, &caf_audio_format, (uint32_t) caf_chunk_header.mChunkSize, &bcount) || bcount != caf_chunk_header.mChunkSize) { error_line ("%s is not a valid .CAF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &caf_audio_format, (uint32_t) caf_chunk_header.mChunkSize)) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&caf_audio_format, CAFAudioFormatFormat); desc_chunk = 1; if (debug_logging_mode) { char formatstr [5]; memcpy (formatstr, caf_audio_format.mFormatID, 4); formatstr [4] = 0; error_line ("format = %s, flags = %x, sampling rate = %g", formatstr, caf_audio_format.mFormatFlags, caf_audio_format.mSampleRate); error_line ("packet = %d bytes and %d frames", caf_audio_format.mBytesPerPacket, caf_audio_format.mFramesPerPacket); error_line ("channels per frame = %d, bits per channel = %d", caf_audio_format.mChannelsPerFrame, caf_audio_format.mBitsPerChannel); } if (strncmp (caf_audio_format.mFormatID, "lpcm", 4) || (caf_audio_format.mFormatFlags & ~3)) supported = FALSE; else if (caf_audio_format.mSampleRate < 1.0 || caf_audio_format.mSampleRate > 16777215.0 || caf_audio_format.mSampleRate != floor (caf_audio_format.mSampleRate)) supported = FALSE; else if (!caf_audio_format.mChannelsPerFrame || caf_audio_format.mChannelsPerFrame > 256) supported = FALSE; else if (caf_audio_format.mBitsPerChannel < 1 || caf_audio_format.mBitsPerChannel > 32 || ((caf_audio_format.mFormatFlags & CAF_FORMAT_FLOAT) && caf_audio_format.mBitsPerChannel != 32)) supported = FALSE; else if (caf_audio_format.mFramesPerPacket != 1 || caf_audio_format.mBytesPerPacket / caf_audio_format.mChannelsPerFrame < (caf_audio_format.mBitsPerChannel + 7) / 8 || caf_audio_format.mBytesPerPacket / caf_audio_format.mChannelsPerFrame > 4 || caf_audio_format.mBytesPerPacket % caf_audio_format.mChannelsPerFrame) supported = FALSE; if (!supported) { error_line ("%s is an unsupported .CAF format!", infilename); return WAVPACK_SOFT_ERROR; } config->bytes_per_sample = caf_audio_format.mBytesPerPacket / caf_audio_format.mChannelsPerFrame; config->float_norm_exp = (caf_audio_format.mFormatFlags & CAF_FORMAT_FLOAT) ? 127 : 0; config->bits_per_sample = caf_audio_format.mBitsPerChannel; config->num_channels = caf_audio_format.mChannelsPerFrame; config->sample_rate = (int) caf_audio_format.mSampleRate; if (!(caf_audio_format.mFormatFlags & CAF_FORMAT_LITTLE_ENDIAN) && config->bytes_per_sample > 1) config->qmode |= QMODE_BIG_ENDIAN; if (config->bytes_per_sample == 1) config->qmode |= QMODE_SIGNED_BYTES; if (debug_logging_mode) { if (config->float_norm_exp == 127) error_line ("data format: 32-bit %s-endian floating point", (config->qmode & QMODE_BIG_ENDIAN) ? "big" : "little"); else error_line ("data format: %d-bit %s-endian integers stored in %d byte(s)", config->bits_per_sample, (config->qmode & QMODE_BIG_ENDIAN) ? "big" : "little", config->bytes_per_sample); } } else if (!strncmp (caf_chunk_header.mChunkType, "chan", 4)) { CAFChannelLayout *caf_channel_layout; if (caf_chunk_header.mChunkSize < 0 || caf_chunk_header.mChunkSize > 1024 || caf_chunk_header.mChunkSize < sizeof (CAFChannelLayout)) { error_line ("this .CAF file has an invalid 'chan' chunk!"); return WAVPACK_SOFT_ERROR; } if (debug_logging_mode) error_line ("'chan' chunk is %d bytes", (int) caf_chunk_header.mChunkSize); caf_channel_layout = malloc ((size_t) caf_chunk_header.mChunkSize); if (!DoReadFile (infile, caf_channel_layout, (uint32_t) caf_chunk_header.mChunkSize, &bcount) || bcount != caf_chunk_header.mChunkSize) { error_line ("%s is not a valid .CAF file!", infilename); free (caf_channel_layout); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, caf_channel_layout, (uint32_t) caf_chunk_header.mChunkSize)) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (caf_channel_layout); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (caf_channel_layout, CAFChannelLayoutFormat); chan_chunk = 1; if (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED)) { error_line ("this CAF file already has channel order information!"); free (caf_channel_layout); return WAVPACK_SOFT_ERROR; } switch (caf_channel_layout->mChannelLayoutTag) { case kCAFChannelLayoutTag_UseChannelDescriptions: { CAFChannelDescription *descriptions = (CAFChannelDescription *) (caf_channel_layout + 1); int num_descriptions = caf_channel_layout->mNumberChannelDescriptions; int label, cindex = 0, idents = 0; if (caf_chunk_header.mChunkSize != sizeof (CAFChannelLayout) + sizeof (CAFChannelDescription) * num_descriptions || num_descriptions != config->num_channels) { error_line ("channel descriptions in 'chan' chunk are the wrong size!"); free (caf_channel_layout); return WAVPACK_SOFT_ERROR; } if (num_descriptions >= 256) { error_line ("%d channel descriptions is more than we can handle...ignoring!"); break; } channel_reorder = malloc (num_descriptions); memset (channel_reorder, -1, num_descriptions); channel_identities = malloc (num_descriptions+1); for (i = 0; i < num_descriptions; ++i) { WavpackBigEndianToNative (descriptions + i, CAFChannelDescriptionFormat); if (debug_logging_mode) error_line ("chan %d --> %d", i + 1, descriptions [i].mChannelLabel); } for (label = 1; label <= 18; ++label) for (i = 0; i < num_descriptions; ++i) if (descriptions [i].mChannelLabel == label) { config->channel_mask |= 1 << (label - 1); channel_reorder [i] = cindex++; break; } for (i = 0; i < num_descriptions; ++i) if (channel_reorder [i] == (unsigned char) -1) { uint32_t clabel = descriptions [i].mChannelLabel; if (clabel == 0 || clabel == 0xffffffff || clabel == 100) channel_identities [idents++] = 0xff; else if ((clabel >= 33 && clabel <= 44) || (clabel >= 200 && clabel <= 207) || (clabel >= 301 && clabel <= 305)) channel_identities [idents++] = clabel >= 301 ? clabel - 80 : clabel; else { error_line ("warning: unknown channel descriptions label: %d", clabel); channel_identities [idents++] = 0xff; } channel_reorder [i] = cindex++; } for (i = 0; i < num_descriptions; ++i) if (channel_reorder [i] != i) break; if (i == num_descriptions) { free (channel_reorder); // no reordering required, so don't channel_reorder = NULL; } else { config->qmode |= QMODE_REORDERED_CHANS; // reordering required, put channel count into layout channel_layout = num_descriptions; } if (!idents) { // if no non-MS channels, free the identities string free (channel_identities); channel_identities = NULL; } else channel_identities [idents] = 0; // otherwise NULL terminate it if (debug_logging_mode) { error_line ("layout_tag = 0x%08x, so generated bitmap of 0x%08x from %d descriptions, %d non-MS", caf_channel_layout->mChannelLayoutTag, config->channel_mask, caf_channel_layout->mNumberChannelDescriptions, idents); if (channel_reorder && num_descriptions <= 8) { char reorder_string [] = "12345678"; for (i = 0; i < num_descriptions; ++i) reorder_string [i] = channel_reorder [i] + '1'; reorder_string [i] = 0; error_line ("reordering string = \"%s\"\n", reorder_string); } } } break; case kCAFChannelLayoutTag_UseChannelBitmap: config->channel_mask = caf_channel_layout->mChannelBitmap; if (debug_logging_mode) error_line ("layout_tag = 0x%08x, so using supplied bitmap of 0x%08x", caf_channel_layout->mChannelLayoutTag, caf_channel_layout->mChannelBitmap); break; default: for (i = 0; i < NUM_LAYOUTS; ++i) if (caf_channel_layout->mChannelLayoutTag == layouts [i].mChannelLayoutTag) { config->channel_mask = layouts [i].mChannelBitmap; channel_layout = layouts [i].mChannelLayoutTag; if (layouts [i].mChannelReorder) { channel_reorder = (unsigned char *) strdup (layouts [i].mChannelReorder); config->qmode |= QMODE_REORDERED_CHANS; } if (layouts [i].mChannelIdentities) channel_identities = (unsigned char *) strdup (layouts [i].mChannelIdentities); if (debug_logging_mode) error_line ("layout_tag 0x%08x found in table, bitmap = 0x%08x, reorder = %s, identities = %s", channel_layout, config->channel_mask, channel_reorder ? "yes" : "no", channel_identities ? "yes" : "no"); break; } if (i == NUM_LAYOUTS && debug_logging_mode) error_line ("layout_tag 0x%08x not found in table...all channels unassigned", caf_channel_layout->mChannelLayoutTag); break; } free (caf_channel_layout); } else if (!strncmp (caf_chunk_header.mChunkType, "data", 4)) { // on the data chunk, get size and exit loop uint32_t mEditCount; if (!desc_chunk || !DoReadFile (infile, &mEditCount, sizeof (mEditCount), &bcount) || bcount != sizeof (mEditCount)) { error_line ("%s is not a valid .CAF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &mEditCount, sizeof (mEditCount))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } if ((config->qmode & QMODE_IGNORE_LENGTH) || caf_chunk_header.mChunkSize == -1) { config->qmode |= QMODE_IGNORE_LENGTH; if (infilesize && DoGetFilePosition (infile) != -1) total_samples = (infilesize - DoGetFilePosition (infile)) / caf_audio_format.mBytesPerPacket; else total_samples = -1; } else { if (infilesize && infilesize - caf_chunk_header.mChunkSize > 16777216) { error_line (".CAF file %s has over 16 MB of extra CAFF data, probably is corrupt!", infilename); return WAVPACK_SOFT_ERROR; } if ((caf_chunk_header.mChunkSize - 4) % caf_audio_format.mBytesPerPacket) { error_line (".CAF file %s has an invalid data chunk size, probably is corrupt!", infilename); return WAVPACK_SOFT_ERROR; } total_samples = (caf_chunk_header.mChunkSize - 4) / caf_audio_format.mBytesPerPacket; if (!total_samples) { error_line ("this .CAF file has no audio samples, probably is corrupt!"); return WAVPACK_SOFT_ERROR; } if (total_samples > MAX_WAVPACK_SAMPLES) { error_line ("%s has too many samples for WavPack!", infilename); return WAVPACK_SOFT_ERROR; } } break; } else { // just copy unknown chunks to output file uint32_t bytes_to_copy = (uint32_t) caf_chunk_header.mChunkSize; char *buff; if (caf_chunk_header.mChunkSize < 0 || caf_chunk_header.mChunkSize > 1048576) { error_line ("%s is not a valid .CAF file!", infilename); return WAVPACK_SOFT_ERROR; } buff = malloc (bytes_to_copy); if (debug_logging_mode) error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes", caf_chunk_header.mChunkType [0], caf_chunk_header.mChunkType [1], caf_chunk_header.mChunkType [2], caf_chunk_header.mChunkType [3], caf_chunk_header.mChunkSize); if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) || bcount != bytes_to_copy || (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, buff, bytes_to_copy))) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (buff); return WAVPACK_SOFT_ERROR; } free (buff); } } if (!chan_chunk && !config->channel_mask && config->num_channels <= 2 && !(config->qmode & QMODE_CHANS_UNASSIGNED)) config->channel_mask = 0x5 - config->num_channels; if (!WavpackSetConfiguration64 (wpc, config, total_samples, channel_identities)) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } if (channel_identities) free (channel_identities); if (channel_layout || channel_reorder) { if (!WavpackSetChannelLayout (wpc, channel_layout, channel_reorder)) { error_line ("problem with setting channel layout (should not happen)"); return WAVPACK_SOFT_ERROR; } if (channel_reorder) free (channel_reorder); } return WAVPACK_NO_ERROR; }
169,462
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: std::string SanitizeRevision(const std::string& revision) { for (size_t i = 0; i < revision.length(); i++) { if (!(revision[i] == '@' && i == 0) && !(revision[i] >= '0' && revision[i] <= '9') && !(revision[i] >= 'a' && revision[i] <= 'z') && !(revision[i] >= 'A' && revision[i] <= 'Z')) { return std::string(); } } return revision; } Commit Message: DevTools: move front-end URL handling to DevToolsUIBindingds BUG=662859 Review-Url: https://codereview.chromium.org/2607833002 Cr-Commit-Position: refs/heads/master@{#440926} CWE ID: CWE-200
std::string SanitizeRevision(const std::string& revision) {
172,464
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: dump_global_data(FILE *fp, data_t * data) { #ifdef _WITH_VRRP_ char buf[64]; #endif if (!data) return; conf_write(fp, "------< Global definitions >------"); #if HAVE_DECL_CLONE_NEWNET conf_write(fp, " Network namespace = %s", data->network_namespace ? data->network_namespace : "(default)"); #endif if (data->instance_name) conf_write(fp, " Instance name = %s", data->instance_name); if (data->router_id) conf_write(fp, " Router ID = %s", data->router_id); if (data->smtp_server.ss_family) { conf_write(fp, " Smtp server = %s", inet_sockaddrtos(&data->smtp_server)); conf_write(fp, " Smtp server port = %u", ntohs(inet_sockaddrport(&data->smtp_server))); } if (data->smtp_helo_name) conf_write(fp, " Smtp HELO name = %s" , data->smtp_helo_name); if (data->smtp_connection_to) conf_write(fp, " Smtp server connection timeout = %lu" , data->smtp_connection_to / TIMER_HZ); if (data->email_from) { conf_write(fp, " Email notification from = %s" , data->email_from); dump_list(fp, data->email); } conf_write(fp, " Default smtp_alert = %s", data->smtp_alert == -1 ? "unset" : data->smtp_alert ? "on" : "off"); #ifdef _WITH_VRRP_ conf_write(fp, " Default smtp_alert_vrrp = %s", data->smtp_alert_vrrp == -1 ? "unset" : data->smtp_alert_vrrp ? "on" : "off"); #endif #ifdef _WITH_LVS_ conf_write(fp, " Default smtp_alert_checker = %s", data->smtp_alert_checker == -1 ? "unset" : data->smtp_alert_checker ? "on" : "off"); #endif #ifdef _WITH_VRRP_ conf_write(fp, " Dynamic interfaces = %s", data->dynamic_interfaces ? "true" : "false"); if (data->dynamic_interfaces) conf_write(fp, " Allow interface changes = %s", data->allow_if_changes ? "true" : "false"); if (data->no_email_faults) conf_write(fp, " Send emails for fault transitions = off"); #endif #ifdef _WITH_LVS_ if (data->lvs_tcp_timeout) conf_write(fp, " LVS TCP timeout = %d", data->lvs_tcp_timeout); if (data->lvs_tcpfin_timeout) conf_write(fp, " LVS TCP FIN timeout = %d", data->lvs_tcpfin_timeout); if (data->lvs_udp_timeout) conf_write(fp, " LVS TCP timeout = %d", data->lvs_udp_timeout); #ifdef _WITH_VRRP_ #ifndef _DEBUG_ if (prog_type == PROG_TYPE_VRRP) #endif conf_write(fp, " Default interface = %s", data->default_ifp ? data->default_ifp->ifname : DFLT_INT); if (data->lvs_syncd.vrrp) { conf_write(fp, " LVS syncd vrrp instance = %s" , data->lvs_syncd.vrrp->iname); if (data->lvs_syncd.ifname) conf_write(fp, " LVS syncd interface = %s" , data->lvs_syncd.ifname); conf_write(fp, " LVS syncd syncid = %u" , data->lvs_syncd.syncid); #ifdef _HAVE_IPVS_SYNCD_ATTRIBUTES_ if (data->lvs_syncd.sync_maxlen) conf_write(fp, " LVS syncd maxlen = %u", data->lvs_syncd.sync_maxlen); if (data->lvs_syncd.mcast_group.ss_family != AF_UNSPEC) conf_write(fp, " LVS mcast group %s", inet_sockaddrtos(&data->lvs_syncd.mcast_group)); if (data->lvs_syncd.mcast_port) conf_write(fp, " LVS syncd mcast port = %d", data->lvs_syncd.mcast_port); if (data->lvs_syncd.mcast_ttl) conf_write(fp, " LVS syncd mcast ttl = %u", data->lvs_syncd.mcast_ttl); #endif } #endif conf_write(fp, " LVS flush = %s", data->lvs_flush ? "true" : "false"); #endif if (data->notify_fifo.name) { conf_write(fp, " Global notify fifo = %s", data->notify_fifo.name); if (data->notify_fifo.script) conf_write(fp, " Global notify fifo script = %s, uid:gid %d:%d", cmd_str(data->notify_fifo.script), data->notify_fifo.script->uid, data->notify_fifo.script->gid); } #ifdef _WITH_VRRP_ if (data->vrrp_notify_fifo.name) { conf_write(fp, " VRRP notify fifo = %s", data->vrrp_notify_fifo.name); if (data->vrrp_notify_fifo.script) conf_write(fp, " VRRP notify fifo script = %s, uid:gid %d:%d", cmd_str(data->vrrp_notify_fifo.script), data->vrrp_notify_fifo.script->uid, data->vrrp_notify_fifo.script->gid); } #endif #ifdef _WITH_LVS_ if (data->lvs_notify_fifo.name) { conf_write(fp, " LVS notify fifo = %s", data->lvs_notify_fifo.name); if (data->lvs_notify_fifo.script) conf_write(fp, " LVS notify fifo script = %s, uid:gid %d:%d", cmd_str(data->lvs_notify_fifo.script), data->lvs_notify_fifo.script->uid, data->lvs_notify_fifo.script->gid); } #endif #ifdef _WITH_VRRP_ if (data->vrrp_mcast_group4.sin_family) { conf_write(fp, " VRRP IPv4 mcast group = %s" , inet_sockaddrtos((struct sockaddr_storage *)&data->vrrp_mcast_group4)); } if (data->vrrp_mcast_group6.sin6_family) { conf_write(fp, " VRRP IPv6 mcast group = %s" , inet_sockaddrtos((struct sockaddr_storage *)&data->vrrp_mcast_group6)); } conf_write(fp, " Gratuitous ARP delay = %u", data->vrrp_garp_delay/TIMER_HZ); conf_write(fp, " Gratuitous ARP repeat = %u", data->vrrp_garp_rep); conf_write(fp, " Gratuitous ARP refresh timer = %lu", data->vrrp_garp_refresh.tv_sec); conf_write(fp, " Gratuitous ARP refresh repeat = %d", data->vrrp_garp_refresh_rep); conf_write(fp, " Gratuitous ARP lower priority delay = %d", data->vrrp_garp_lower_prio_delay == PARAMETER_UNSET ? PARAMETER_UNSET : data->vrrp_garp_lower_prio_delay / TIMER_HZ); conf_write(fp, " Gratuitous ARP lower priority repeat = %d", data->vrrp_garp_lower_prio_rep); conf_write(fp, " Send advert after receive lower priority advert = %s", data->vrrp_lower_prio_no_advert ? "false" : "true"); conf_write(fp, " Send advert after receive higher priority advert = %s", data->vrrp_higher_prio_send_advert ? "true" : "false"); conf_write(fp, " Gratuitous ARP interval = %d", data->vrrp_garp_interval); conf_write(fp, " Gratuitous NA interval = %d", data->vrrp_gna_interval); conf_write(fp, " VRRP default protocol version = %d", data->vrrp_version); if (data->vrrp_iptables_inchain[0]) conf_write(fp," Iptables input chain = %s", data->vrrp_iptables_inchain); if (data->vrrp_iptables_outchain[0]) conf_write(fp," Iptables output chain = %s", data->vrrp_iptables_outchain); #ifdef _HAVE_LIBIPSET_ conf_write(fp, " Using ipsets = %s", data->using_ipsets ? "true" : "false"); if (data->vrrp_ipset_address[0]) conf_write(fp," ipset IPv4 address set = %s", data->vrrp_ipset_address); if (data->vrrp_ipset_address6[0]) conf_write(fp," ipset IPv6 address set = %s", data->vrrp_ipset_address6); if (data->vrrp_ipset_address_iface6[0]) conf_write(fp," ipset IPv6 address,iface set = %s", data->vrrp_ipset_address_iface6); #endif conf_write(fp, " VRRP check unicast_src = %s", data->vrrp_check_unicast_src ? "true" : "false"); conf_write(fp, " VRRP skip check advert addresses = %s", data->vrrp_skip_check_adv_addr ? "true" : "false"); conf_write(fp, " VRRP strict mode = %s", data->vrrp_strict ? "true" : "false"); conf_write(fp, " VRRP process priority = %d", data->vrrp_process_priority); conf_write(fp, " VRRP don't swap = %s", data->vrrp_no_swap ? "true" : "false"); #ifdef _HAVE_SCHED_RT_ conf_write(fp, " VRRP realtime priority = %u", data->vrrp_realtime_priority); #if HAVE_DECL_RLIMIT_RTTIME conf_write(fp, " VRRP realtime limit = %lu", data->vrrp_rlimit_rt); #endif #endif #endif #ifdef _WITH_LVS_ conf_write(fp, " Checker process priority = %d", data->checker_process_priority); conf_write(fp, " Checker don't swap = %s", data->checker_no_swap ? "true" : "false"); #ifdef _HAVE_SCHED_RT_ conf_write(fp, " Checker realtime priority = %u", data->checker_realtime_priority); #if HAVE_DECL_RLIMIT_RTTIME conf_write(fp, " Checker realtime limit = %lu", data->checker_rlimit_rt); #endif #endif #endif #ifdef _WITH_BFD_ conf_write(fp, " BFD process priority = %d", data->bfd_process_priority); conf_write(fp, " BFD don't swap = %s", data->bfd_no_swap ? "true" : "false"); #ifdef _HAVE_SCHED_RT_ conf_write(fp, " BFD realtime priority = %u", data->bfd_realtime_priority); #if HAVE_DECL_RLIMIT_RTTIME conf_write(fp, " BFD realtime limit = %lu", data->bfd_rlimit_rt); #endif #endif #endif #ifdef _WITH_SNMP_VRRP_ conf_write(fp, " SNMP vrrp %s", data->enable_snmp_vrrp ? "enabled" : "disabled"); #endif #ifdef _WITH_SNMP_CHECKER_ conf_write(fp, " SNMP checker %s", data->enable_snmp_checker ? "enabled" : "disabled"); #endif #ifdef _WITH_SNMP_RFCV2_ conf_write(fp, " SNMP RFCv2 %s", data->enable_snmp_rfcv2 ? "enabled" : "disabled"); #endif #ifdef _WITH_SNMP_RFCV3_ conf_write(fp, " SNMP RFCv3 %s", data->enable_snmp_rfcv3 ? "enabled" : "disabled"); #endif #ifdef _WITH_SNMP_ conf_write(fp, " SNMP traps %s", data->enable_traps ? "enabled" : "disabled"); conf_write(fp, " SNMP socket = %s", data->snmp_socket ? data->snmp_socket : "default (unix:/var/agentx/master)"); #endif #ifdef _WITH_DBUS_ conf_write(fp, " DBus %s", data->enable_dbus ? "enabled" : "disabled"); conf_write(fp, " DBus service name = %s", data->dbus_service_name ? data->dbus_service_name : ""); #endif conf_write(fp, " Script security %s", script_security ? "enabled" : "disabled"); conf_write(fp, " Default script uid:gid %d:%d", default_script_uid, default_script_gid); #ifdef _WITH_VRRP_ conf_write(fp, " vrrp_netlink_cmd_rcv_bufs = %u", global_data->vrrp_netlink_cmd_rcv_bufs); conf_write(fp, " vrrp_netlink_cmd_rcv_bufs_force = %u", global_data->vrrp_netlink_cmd_rcv_bufs_force); conf_write(fp, " vrrp_netlink_monitor_rcv_bufs = %u", global_data->vrrp_netlink_monitor_rcv_bufs); conf_write(fp, " vrrp_netlink_monitor_rcv_bufs_force = %u", global_data->vrrp_netlink_monitor_rcv_bufs_force); #endif #ifdef _WITH_LVS_ conf_write(fp, " lvs_netlink_cmd_rcv_bufs = %u", global_data->lvs_netlink_cmd_rcv_bufs); conf_write(fp, " lvs_netlink_cmd_rcv_bufs_force = %u", global_data->lvs_netlink_cmd_rcv_bufs_force); conf_write(fp, " lvs_netlink_monitor_rcv_bufs = %u", global_data->lvs_netlink_monitor_rcv_bufs); conf_write(fp, " lvs_netlink_monitor_rcv_bufs_force = %u", global_data->lvs_netlink_monitor_rcv_bufs_force); conf_write(fp, " rs_init_notifies = %u", global_data->rs_init_notifies); conf_write(fp, " no_checker_emails = %u", global_data->no_checker_emails); #endif #ifdef _WITH_VRRP_ buf[0] = '\0'; if (global_data->vrrp_rx_bufs_policy & RX_BUFS_POLICY_MTU) strcpy(buf, " rx_bufs_policy = MTU"); else if (global_data->vrrp_rx_bufs_policy & RX_BUFS_POLICY_ADVERT) strcpy(buf, " rx_bufs_policy = ADVERT"); else if (global_data->vrrp_rx_bufs_policy & RX_BUFS_SIZE) sprintf(buf, " rx_bufs_size = %lu", global_data->vrrp_rx_bufs_size); if (buf[0]) conf_write(fp, "%s", buf); conf_write(fp, " rx_bufs_multiples = %u", global_data->vrrp_rx_bufs_multiples); #endif } Commit Message: Add command line and configuration option to set umask Issue #1048 identified that files created by keepalived are created with mode 0666. This commit changes the default to 0644, and also allows the umask to be specified in the configuration or as a command line option. Signed-off-by: Quentin Armitage <[email protected]> CWE ID: CWE-200
dump_global_data(FILE *fp, data_t * data) { #ifdef _WITH_VRRP_ char buf[64]; #endif if (!data) return; conf_write(fp, "------< Global definitions >------"); #if HAVE_DECL_CLONE_NEWNET conf_write(fp, " Network namespace = %s", data->network_namespace ? data->network_namespace : "(default)"); #endif if (data->instance_name) conf_write(fp, " Instance name = %s", data->instance_name); if (data->router_id) conf_write(fp, " Router ID = %s", data->router_id); if (data->smtp_server.ss_family) { conf_write(fp, " Smtp server = %s", inet_sockaddrtos(&data->smtp_server)); conf_write(fp, " Smtp server port = %u", ntohs(inet_sockaddrport(&data->smtp_server))); } if (data->smtp_helo_name) conf_write(fp, " Smtp HELO name = %s" , data->smtp_helo_name); if (data->smtp_connection_to) conf_write(fp, " Smtp server connection timeout = %lu" , data->smtp_connection_to / TIMER_HZ); if (data->email_from) { conf_write(fp, " Email notification from = %s" , data->email_from); dump_list(fp, data->email); } conf_write(fp, " Default smtp_alert = %s", data->smtp_alert == -1 ? "unset" : data->smtp_alert ? "on" : "off"); #ifdef _WITH_VRRP_ conf_write(fp, " Default smtp_alert_vrrp = %s", data->smtp_alert_vrrp == -1 ? "unset" : data->smtp_alert_vrrp ? "on" : "off"); #endif #ifdef _WITH_LVS_ conf_write(fp, " Default smtp_alert_checker = %s", data->smtp_alert_checker == -1 ? "unset" : data->smtp_alert_checker ? "on" : "off"); #endif #ifdef _WITH_VRRP_ conf_write(fp, " Dynamic interfaces = %s", data->dynamic_interfaces ? "true" : "false"); if (data->dynamic_interfaces) conf_write(fp, " Allow interface changes = %s", data->allow_if_changes ? "true" : "false"); if (data->no_email_faults) conf_write(fp, " Send emails for fault transitions = off"); #endif #ifdef _WITH_LVS_ if (data->lvs_tcp_timeout) conf_write(fp, " LVS TCP timeout = %d", data->lvs_tcp_timeout); if (data->lvs_tcpfin_timeout) conf_write(fp, " LVS TCP FIN timeout = %d", data->lvs_tcpfin_timeout); if (data->lvs_udp_timeout) conf_write(fp, " LVS TCP timeout = %d", data->lvs_udp_timeout); #ifdef _WITH_VRRP_ #ifndef _DEBUG_ if (prog_type == PROG_TYPE_VRRP) #endif conf_write(fp, " Default interface = %s", data->default_ifp ? data->default_ifp->ifname : DFLT_INT); if (data->lvs_syncd.vrrp) { conf_write(fp, " LVS syncd vrrp instance = %s" , data->lvs_syncd.vrrp->iname); if (data->lvs_syncd.ifname) conf_write(fp, " LVS syncd interface = %s" , data->lvs_syncd.ifname); conf_write(fp, " LVS syncd syncid = %u" , data->lvs_syncd.syncid); #ifdef _HAVE_IPVS_SYNCD_ATTRIBUTES_ if (data->lvs_syncd.sync_maxlen) conf_write(fp, " LVS syncd maxlen = %u", data->lvs_syncd.sync_maxlen); if (data->lvs_syncd.mcast_group.ss_family != AF_UNSPEC) conf_write(fp, " LVS mcast group %s", inet_sockaddrtos(&data->lvs_syncd.mcast_group)); if (data->lvs_syncd.mcast_port) conf_write(fp, " LVS syncd mcast port = %d", data->lvs_syncd.mcast_port); if (data->lvs_syncd.mcast_ttl) conf_write(fp, " LVS syncd mcast ttl = %u", data->lvs_syncd.mcast_ttl); #endif } #endif conf_write(fp, " LVS flush = %s", data->lvs_flush ? "true" : "false"); #endif if (data->notify_fifo.name) { conf_write(fp, " Global notify fifo = %s", data->notify_fifo.name); if (data->notify_fifo.script) conf_write(fp, " Global notify fifo script = %s, uid:gid %d:%d", cmd_str(data->notify_fifo.script), data->notify_fifo.script->uid, data->notify_fifo.script->gid); } #ifdef _WITH_VRRP_ if (data->vrrp_notify_fifo.name) { conf_write(fp, " VRRP notify fifo = %s", data->vrrp_notify_fifo.name); if (data->vrrp_notify_fifo.script) conf_write(fp, " VRRP notify fifo script = %s, uid:gid %d:%d", cmd_str(data->vrrp_notify_fifo.script), data->vrrp_notify_fifo.script->uid, data->vrrp_notify_fifo.script->gid); } #endif #ifdef _WITH_LVS_ if (data->lvs_notify_fifo.name) { conf_write(fp, " LVS notify fifo = %s", data->lvs_notify_fifo.name); if (data->lvs_notify_fifo.script) conf_write(fp, " LVS notify fifo script = %s, uid:gid %d:%d", cmd_str(data->lvs_notify_fifo.script), data->lvs_notify_fifo.script->uid, data->lvs_notify_fifo.script->gid); } #endif #ifdef _WITH_VRRP_ if (data->vrrp_mcast_group4.sin_family) { conf_write(fp, " VRRP IPv4 mcast group = %s" , inet_sockaddrtos((struct sockaddr_storage *)&data->vrrp_mcast_group4)); } if (data->vrrp_mcast_group6.sin6_family) { conf_write(fp, " VRRP IPv6 mcast group = %s" , inet_sockaddrtos((struct sockaddr_storage *)&data->vrrp_mcast_group6)); } conf_write(fp, " Gratuitous ARP delay = %u", data->vrrp_garp_delay/TIMER_HZ); conf_write(fp, " Gratuitous ARP repeat = %u", data->vrrp_garp_rep); conf_write(fp, " Gratuitous ARP refresh timer = %lu", data->vrrp_garp_refresh.tv_sec); conf_write(fp, " Gratuitous ARP refresh repeat = %d", data->vrrp_garp_refresh_rep); conf_write(fp, " Gratuitous ARP lower priority delay = %d", data->vrrp_garp_lower_prio_delay == PARAMETER_UNSET ? PARAMETER_UNSET : data->vrrp_garp_lower_prio_delay / TIMER_HZ); conf_write(fp, " Gratuitous ARP lower priority repeat = %d", data->vrrp_garp_lower_prio_rep); conf_write(fp, " Send advert after receive lower priority advert = %s", data->vrrp_lower_prio_no_advert ? "false" : "true"); conf_write(fp, " Send advert after receive higher priority advert = %s", data->vrrp_higher_prio_send_advert ? "true" : "false"); conf_write(fp, " Gratuitous ARP interval = %d", data->vrrp_garp_interval); conf_write(fp, " Gratuitous NA interval = %d", data->vrrp_gna_interval); conf_write(fp, " VRRP default protocol version = %d", data->vrrp_version); if (data->vrrp_iptables_inchain[0]) conf_write(fp," Iptables input chain = %s", data->vrrp_iptables_inchain); if (data->vrrp_iptables_outchain[0]) conf_write(fp," Iptables output chain = %s", data->vrrp_iptables_outchain); #ifdef _HAVE_LIBIPSET_ conf_write(fp, " Using ipsets = %s", data->using_ipsets ? "true" : "false"); if (data->vrrp_ipset_address[0]) conf_write(fp," ipset IPv4 address set = %s", data->vrrp_ipset_address); if (data->vrrp_ipset_address6[0]) conf_write(fp," ipset IPv6 address set = %s", data->vrrp_ipset_address6); if (data->vrrp_ipset_address_iface6[0]) conf_write(fp," ipset IPv6 address,iface set = %s", data->vrrp_ipset_address_iface6); #endif conf_write(fp, " VRRP check unicast_src = %s", data->vrrp_check_unicast_src ? "true" : "false"); conf_write(fp, " VRRP skip check advert addresses = %s", data->vrrp_skip_check_adv_addr ? "true" : "false"); conf_write(fp, " VRRP strict mode = %s", data->vrrp_strict ? "true" : "false"); conf_write(fp, " VRRP process priority = %d", data->vrrp_process_priority); conf_write(fp, " VRRP don't swap = %s", data->vrrp_no_swap ? "true" : "false"); #ifdef _HAVE_SCHED_RT_ conf_write(fp, " VRRP realtime priority = %u", data->vrrp_realtime_priority); #if HAVE_DECL_RLIMIT_RTTIME conf_write(fp, " VRRP realtime limit = %lu", data->vrrp_rlimit_rt); #endif #endif #endif #ifdef _WITH_LVS_ conf_write(fp, " Checker process priority = %d", data->checker_process_priority); conf_write(fp, " Checker don't swap = %s", data->checker_no_swap ? "true" : "false"); #ifdef _HAVE_SCHED_RT_ conf_write(fp, " Checker realtime priority = %u", data->checker_realtime_priority); #if HAVE_DECL_RLIMIT_RTTIME conf_write(fp, " Checker realtime limit = %lu", data->checker_rlimit_rt); #endif #endif #endif #ifdef _WITH_BFD_ conf_write(fp, " BFD process priority = %d", data->bfd_process_priority); conf_write(fp, " BFD don't swap = %s", data->bfd_no_swap ? "true" : "false"); #ifdef _HAVE_SCHED_RT_ conf_write(fp, " BFD realtime priority = %u", data->bfd_realtime_priority); #if HAVE_DECL_RLIMIT_RTTIME conf_write(fp, " BFD realtime limit = %lu", data->bfd_rlimit_rt); #endif #endif #endif #ifdef _WITH_SNMP_VRRP_ conf_write(fp, " SNMP vrrp %s", data->enable_snmp_vrrp ? "enabled" : "disabled"); #endif #ifdef _WITH_SNMP_CHECKER_ conf_write(fp, " SNMP checker %s", data->enable_snmp_checker ? "enabled" : "disabled"); #endif #ifdef _WITH_SNMP_RFCV2_ conf_write(fp, " SNMP RFCv2 %s", data->enable_snmp_rfcv2 ? "enabled" : "disabled"); #endif #ifdef _WITH_SNMP_RFCV3_ conf_write(fp, " SNMP RFCv3 %s", data->enable_snmp_rfcv3 ? "enabled" : "disabled"); #endif #ifdef _WITH_SNMP_ conf_write(fp, " SNMP traps %s", data->enable_traps ? "enabled" : "disabled"); conf_write(fp, " SNMP socket = %s", data->snmp_socket ? data->snmp_socket : "default (unix:/var/agentx/master)"); #endif #ifdef _WITH_DBUS_ conf_write(fp, " DBus %s", data->enable_dbus ? "enabled" : "disabled"); conf_write(fp, " DBus service name = %s", data->dbus_service_name ? data->dbus_service_name : ""); #endif conf_write(fp, " Script security %s", script_security ? "enabled" : "disabled"); conf_write(fp, " Default script uid:gid %d:%d", default_script_uid, default_script_gid); #ifdef _WITH_VRRP_ conf_write(fp, " vrrp_netlink_cmd_rcv_bufs = %u", global_data->vrrp_netlink_cmd_rcv_bufs); conf_write(fp, " vrrp_netlink_cmd_rcv_bufs_force = %u", global_data->vrrp_netlink_cmd_rcv_bufs_force); conf_write(fp, " vrrp_netlink_monitor_rcv_bufs = %u", global_data->vrrp_netlink_monitor_rcv_bufs); conf_write(fp, " vrrp_netlink_monitor_rcv_bufs_force = %u", global_data->vrrp_netlink_monitor_rcv_bufs_force); #endif #ifdef _WITH_LVS_ conf_write(fp, " lvs_netlink_cmd_rcv_bufs = %u", global_data->lvs_netlink_cmd_rcv_bufs); conf_write(fp, " lvs_netlink_cmd_rcv_bufs_force = %u", global_data->lvs_netlink_cmd_rcv_bufs_force); conf_write(fp, " lvs_netlink_monitor_rcv_bufs = %u", global_data->lvs_netlink_monitor_rcv_bufs); conf_write(fp, " lvs_netlink_monitor_rcv_bufs_force = %u", global_data->lvs_netlink_monitor_rcv_bufs_force); conf_write(fp, " rs_init_notifies = %u", global_data->rs_init_notifies); conf_write(fp, " no_checker_emails = %u", global_data->no_checker_emails); #endif #ifdef _WITH_VRRP_ buf[0] = '\0'; if (global_data->vrrp_rx_bufs_policy & RX_BUFS_POLICY_MTU) strcpy(buf, " rx_bufs_policy = MTU"); else if (global_data->vrrp_rx_bufs_policy & RX_BUFS_POLICY_ADVERT) strcpy(buf, " rx_bufs_policy = ADVERT"); else if (global_data->vrrp_rx_bufs_policy & RX_BUFS_SIZE) sprintf(buf, " rx_bufs_size = %lu", global_data->vrrp_rx_bufs_size); if (buf[0]) conf_write(fp, "%s", buf); conf_write(fp, " rx_bufs_multiples = %u", global_data->vrrp_rx_bufs_multiples); conf_write(fp, " umask = 0%o", global_data->umask); #endif }
168,980
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: parse_rockridge(struct archive_read *a, struct file_info *file, const unsigned char *p, const unsigned char *end) { struct iso9660 *iso9660; iso9660 = (struct iso9660 *)(a->format->data); while (p + 4 <= end /* Enough space for another entry. */ && p[0] >= 'A' && p[0] <= 'Z' /* Sanity-check 1st char of name. */ && p[1] >= 'A' && p[1] <= 'Z' /* Sanity-check 2nd char of name. */ && p[2] >= 4 /* Sanity-check length. */ && p + p[2] <= end) { /* Sanity-check length. */ const unsigned char *data = p + 4; int data_length = p[2] - 4; int version = p[3]; switch(p[0]) { case 'C': if (p[1] == 'E') { if (version == 1 && data_length == 24) { /* * CE extension comprises: * 8 byte sector containing extension * 8 byte offset w/in above sector * 8 byte length of continuation */ int32_t location = archive_le32dec(data); file->ce_offset = archive_le32dec(data+8); file->ce_size = archive_le32dec(data+16); if (register_CE(a, location, file) != ARCHIVE_OK) return (ARCHIVE_FATAL); } } else if (p[1] == 'L') { if (version == 1 && data_length == 8) { file->cl_offset = (uint64_t) iso9660->logical_block_size * (uint64_t)archive_le32dec(data); iso9660->seenRockridge = 1; } } break; case 'N': if (p[1] == 'M') { if (version == 1) { parse_rockridge_NM1(file, data, data_length); iso9660->seenRockridge = 1; } } break; case 'P': /* * PD extension is padding; * contents are always ignored. * * PL extension won't appear; * contents are always ignored. */ if (p[1] == 'N') { if (version == 1 && data_length == 16) { file->rdev = toi(data,4); file->rdev <<= 32; file->rdev |= toi(data + 8, 4); iso9660->seenRockridge = 1; } } else if (p[1] == 'X') { /* * PX extension comprises: * 8 bytes for mode, * 8 bytes for nlinks, * 8 bytes for uid, * 8 bytes for gid, * 8 bytes for inode. */ if (version == 1) { if (data_length >= 8) file->mode = toi(data, 4); if (data_length >= 16) file->nlinks = toi(data + 8, 4); if (data_length >= 24) file->uid = toi(data + 16, 4); if (data_length >= 32) file->gid = toi(data + 24, 4); if (data_length >= 40) file->number = toi(data + 32, 4); iso9660->seenRockridge = 1; } } break; case 'R': if (p[1] == 'E' && version == 1) { file->re = 1; iso9660->seenRockridge = 1; } else if (p[1] == 'R' && version == 1) { /* * RR extension comprises: * one byte flag value * This extension is obsolete, * so contents are always ignored. */ } break; case 'S': if (p[1] == 'L') { if (version == 1) { parse_rockridge_SL1(file, data, data_length); iso9660->seenRockridge = 1; } } else if (p[1] == 'T' && data_length == 0 && version == 1) { /* * ST extension marks end of this * block of SUSP entries. * * It allows SUSP to coexist with * non-SUSP uses of the System * Use Area by placing non-SUSP data * after SUSP data. */ iso9660->seenSUSP = 0; iso9660->seenRockridge = 0; return (ARCHIVE_OK); } break; case 'T': if (p[1] == 'F') { if (version == 1) { parse_rockridge_TF1(file, data, data_length); iso9660->seenRockridge = 1; } } break; case 'Z': if (p[1] == 'F') { if (version == 1) parse_rockridge_ZF1(file, data, data_length); } break; default: break; } p += p[2]; } return (ARCHIVE_OK); } Commit Message: iso9660: Fail when expected Rockridge extensions is missing A corrupted or malicious ISO9660 image can cause read_CE() to loop forever. read_CE() calls parse_rockridge(), expecting a Rockridge extension to be read. However, parse_rockridge() is structured as a while loop starting with a sanity check, and if the sanity check fails before the loop has run, the function returns ARCHIVE_OK without advancing the position in the file. This causes read_CE() to retry indefinitely. Make parse_rockridge() return ARCHIVE_WARN if it didn't read an extension. As someone with no real knowledge of the format, this seems more apt than ARCHIVE_FATAL, but both the call-sites escalate it to a fatal error immediately anyway. Found with a combination of AFL, afl-rb (FairFuzz) and qsym. CWE ID: CWE-400
parse_rockridge(struct archive_read *a, struct file_info *file, const unsigned char *p, const unsigned char *end) { struct iso9660 *iso9660; int entry_seen = 0; iso9660 = (struct iso9660 *)(a->format->data); while (p + 4 <= end /* Enough space for another entry. */ && p[0] >= 'A' && p[0] <= 'Z' /* Sanity-check 1st char of name. */ && p[1] >= 'A' && p[1] <= 'Z' /* Sanity-check 2nd char of name. */ && p[2] >= 4 /* Sanity-check length. */ && p + p[2] <= end) { /* Sanity-check length. */ const unsigned char *data = p + 4; int data_length = p[2] - 4; int version = p[3]; switch(p[0]) { case 'C': if (p[1] == 'E') { if (version == 1 && data_length == 24) { /* * CE extension comprises: * 8 byte sector containing extension * 8 byte offset w/in above sector * 8 byte length of continuation */ int32_t location = archive_le32dec(data); file->ce_offset = archive_le32dec(data+8); file->ce_size = archive_le32dec(data+16); if (register_CE(a, location, file) != ARCHIVE_OK) return (ARCHIVE_FATAL); } } else if (p[1] == 'L') { if (version == 1 && data_length == 8) { file->cl_offset = (uint64_t) iso9660->logical_block_size * (uint64_t)archive_le32dec(data); iso9660->seenRockridge = 1; } } break; case 'N': if (p[1] == 'M') { if (version == 1) { parse_rockridge_NM1(file, data, data_length); iso9660->seenRockridge = 1; } } break; case 'P': /* * PD extension is padding; * contents are always ignored. * * PL extension won't appear; * contents are always ignored. */ if (p[1] == 'N') { if (version == 1 && data_length == 16) { file->rdev = toi(data,4); file->rdev <<= 32; file->rdev |= toi(data + 8, 4); iso9660->seenRockridge = 1; } } else if (p[1] == 'X') { /* * PX extension comprises: * 8 bytes for mode, * 8 bytes for nlinks, * 8 bytes for uid, * 8 bytes for gid, * 8 bytes for inode. */ if (version == 1) { if (data_length >= 8) file->mode = toi(data, 4); if (data_length >= 16) file->nlinks = toi(data + 8, 4); if (data_length >= 24) file->uid = toi(data + 16, 4); if (data_length >= 32) file->gid = toi(data + 24, 4); if (data_length >= 40) file->number = toi(data + 32, 4); iso9660->seenRockridge = 1; } } break; case 'R': if (p[1] == 'E' && version == 1) { file->re = 1; iso9660->seenRockridge = 1; } else if (p[1] == 'R' && version == 1) { /* * RR extension comprises: * one byte flag value * This extension is obsolete, * so contents are always ignored. */ } break; case 'S': if (p[1] == 'L') { if (version == 1) { parse_rockridge_SL1(file, data, data_length); iso9660->seenRockridge = 1; } } else if (p[1] == 'T' && data_length == 0 && version == 1) { /* * ST extension marks end of this * block of SUSP entries. * * It allows SUSP to coexist with * non-SUSP uses of the System * Use Area by placing non-SUSP data * after SUSP data. */ iso9660->seenSUSP = 0; iso9660->seenRockridge = 0; return (ARCHIVE_OK); } break; case 'T': if (p[1] == 'F') { if (version == 1) { parse_rockridge_TF1(file, data, data_length); iso9660->seenRockridge = 1; } } break; case 'Z': if (p[1] == 'F') { if (version == 1) parse_rockridge_ZF1(file, data, data_length); } break; default: break; } p += p[2]; entry_seen = 1; } if (entry_seen) return (ARCHIVE_OK); else { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Tried to parse Rockridge extensions, but none found"); return (ARCHIVE_WARN); } }
169,483
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int smacker_decode_tree(BitstreamContext *bc, HuffContext *hc, uint32_t prefix, int length) { if (!bitstream_read_bit(bc)) { // Leaf if(hc->current >= 256){ av_log(NULL, AV_LOG_ERROR, "Tree size exceeded!\n"); return AVERROR_INVALIDDATA; } if(length){ hc->bits[hc->current] = prefix; hc->lengths[hc->current] = length; } else { hc->bits[hc->current] = 0; hc->lengths[hc->current] = 0; } hc->values[hc->current] = bitstream_read(bc, 8); hc->current++; if(hc->maxlength < length) hc->maxlength = length; return 0; } else { //Node int r; length++; r = smacker_decode_tree(bc, hc, prefix, length); if(r) return r; return smacker_decode_tree(bc, hc, prefix | (1 << (length - 1)), length); } } Commit Message: smacker: add sanity check for length in smacker_decode_tree() Signed-off-by: Michael Niedermayer <[email protected]> Bug-Id: 1098 Cc: [email protected] Signed-off-by: Sean McGovern <[email protected]> CWE ID: CWE-119
static int smacker_decode_tree(BitstreamContext *bc, HuffContext *hc, uint32_t prefix, int length) { if (length > SMKTREE_DECODE_MAX_RECURSION) { av_log(NULL, AV_LOG_ERROR, "Maximum tree recursion level exceeded.\n"); return AVERROR_INVALIDDATA; } if (!bitstream_read_bit(bc)) { // Leaf if(hc->current >= 256){ av_log(NULL, AV_LOG_ERROR, "Tree size exceeded!\n"); return AVERROR_INVALIDDATA; } if(length){ hc->bits[hc->current] = prefix; hc->lengths[hc->current] = length; } else { hc->bits[hc->current] = 0; hc->lengths[hc->current] = 0; } hc->values[hc->current] = bitstream_read(bc, 8); hc->current++; if(hc->maxlength < length) hc->maxlength = length; return 0; } else { //Node int r; length++; r = smacker_decode_tree(bc, hc, prefix, length); if(r) return r; return smacker_decode_tree(bc, hc, prefix | (1 << (length - 1)), length); } }
167,671
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int64 GetReceivedListPrefValue(size_t index) { return ListPrefInt64Value(*received_update_, index); } Commit Message: Added daily UMA for non-data-reduction-proxy data usage when the proxy is enabled. BUG=325325 Review URL: https://codereview.chromium.org/106113002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@239897 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-416
int64 GetReceivedListPrefValue(size_t index) { return received_.GetListPrefValue(index); }
171,324
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: sf_open_fd (int fd, int mode, SF_INFO *sfinfo, int close_desc) { SF_PRIVATE *psf ; if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_SD2) { sf_errno = SFE_SD2_FD_DISALLOWED ; return NULL ; } ; if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; copy_filename (psf, "") ; psf->file.mode = mode ; psf_set_file (psf, fd) ; psf->is_pipe = psf_is_pipe (psf) ; psf->fileoffset = psf_ftell (psf) ; if (! close_desc) psf->file.do_not_close_descriptor = SF_TRUE ; return psf_open_file (psf, sfinfo) ; } /* sf_open_fd */ Commit Message: src/ : Move to a variable length header buffer Previously, the `psf->header` buffer was a fixed length specified by `SF_HEADER_LEN` which was set to `12292`. This was problematic for two reasons; this value was un-necessarily large for the majority of files and too small for some others. Now the size of the header buffer starts at 256 bytes and grows as necessary up to a maximum of 100k. CWE ID: CWE-119
sf_open_fd (int fd, int mode, SF_INFO *sfinfo, int close_desc) { SF_PRIVATE *psf ; if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_SD2) { sf_errno = SFE_SD2_FD_DISALLOWED ; return NULL ; } ; if ((psf = psf_allocate ()) == NULL) { sf_errno = SFE_MALLOC_FAILED ; return NULL ; } ; psf_init_files (psf) ; copy_filename (psf, "") ; psf->file.mode = mode ; psf_set_file (psf, fd) ; psf->is_pipe = psf_is_pipe (psf) ; psf->fileoffset = psf_ftell (psf) ; if (! close_desc) psf->file.do_not_close_descriptor = SF_TRUE ; return psf_open_file (psf, sfinfo) ; } /* sf_open_fd */
170,068
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int phar_get_entry_data(phar_entry_data **ret, char *fname, int fname_len, char *path, int path_len, char *mode, char allow_dir, char **error, int security TSRMLS_DC) /* {{{ */ { phar_archive_data *phar; phar_entry_info *entry; int for_write = mode[0] != 'r' || mode[1] == '+'; int for_append = mode[0] == 'a'; int for_create = mode[0] != 'r'; int for_trunc = mode[0] == 'w'; if (!ret) { return FAILURE; } *ret = NULL; if (error) { *error = NULL; } if (FAILURE == phar_get_archive(&phar, fname, fname_len, NULL, 0, error TSRMLS_CC)) { return FAILURE; } if (for_write && PHAR_G(readonly) && !phar->is_data) { if (error) { spprintf(error, 4096, "phar error: file \"%s\" in phar \"%s\" cannot be opened for writing, disabled by ini setting", path, fname); } return FAILURE; } if (!path_len) { if (error) { spprintf(error, 4096, "phar error: file \"\" in phar \"%s\" cannot be empty", fname); } return FAILURE; } really_get_entry: if (allow_dir) { if ((entry = phar_get_entry_info_dir(phar, path, path_len, allow_dir, for_create && !PHAR_G(readonly) && !phar->is_data ? NULL : error, security TSRMLS_CC)) == NULL) { if (for_create && (!PHAR_G(readonly) || phar->is_data)) { return SUCCESS; } return FAILURE; } } else { if ((entry = phar_get_entry_info(phar, path, path_len, for_create && !PHAR_G(readonly) && !phar->is_data ? NULL : error, security TSRMLS_CC)) == NULL) { if (for_create && (!PHAR_G(readonly) || phar->is_data)) { return SUCCESS; } return FAILURE; } } if (for_write && phar->is_persistent) { if (FAILURE == phar_copy_on_write(&phar TSRMLS_CC)) { if (error) { spprintf(error, 4096, "phar error: file \"%s\" in phar \"%s\" cannot be opened for writing, could not make cached phar writeable", path, fname); } return FAILURE; } else { goto really_get_entry; } } if (entry->is_modified && !for_write) { if (error) { spprintf(error, 4096, "phar error: file \"%s\" in phar \"%s\" cannot be opened for reading, writable file pointers are open", path, fname); } return FAILURE; } if (entry->fp_refcount && for_write) { if (error) { spprintf(error, 4096, "phar error: file \"%s\" in phar \"%s\" cannot be opened for writing, readable file pointers are open", path, fname); } return FAILURE; } if (entry->is_deleted) { if (!for_create) { return FAILURE; } entry->is_deleted = 0; } if (entry->is_dir) { *ret = (phar_entry_data *) emalloc(sizeof(phar_entry_data)); (*ret)->position = 0; (*ret)->fp = NULL; (*ret)->phar = phar; (*ret)->for_write = for_write; (*ret)->internal_file = entry; (*ret)->is_zip = entry->is_zip; (*ret)->is_tar = entry->is_tar; if (!phar->is_persistent) { ++(entry->phar->refcount); ++(entry->fp_refcount); } return SUCCESS; } if (entry->fp_type == PHAR_MOD) { if (for_trunc) { if (FAILURE == phar_create_writeable_entry(phar, entry, error TSRMLS_CC)) { return FAILURE; } } else if (for_append) { phar_seek_efp(entry, 0, SEEK_END, 0, 0 TSRMLS_CC); } } else { if (for_write) { if (entry->link) { efree(entry->link); entry->link = NULL; entry->tar_type = (entry->is_tar ? TAR_FILE : '\0'); } if (for_trunc) { if (FAILURE == phar_create_writeable_entry(phar, entry, error TSRMLS_CC)) { return FAILURE; } } else { if (FAILURE == phar_separate_entry_fp(entry, error TSRMLS_CC)) { return FAILURE; } } } else { if (FAILURE == phar_open_entry_fp(entry, error, 1 TSRMLS_CC)) { return FAILURE; } } } *ret = (phar_entry_data *) emalloc(sizeof(phar_entry_data)); (*ret)->position = 0; (*ret)->phar = phar; (*ret)->for_write = for_write; (*ret)->internal_file = entry; (*ret)->is_zip = entry->is_zip; (*ret)->is_tar = entry->is_tar; (*ret)->fp = phar_get_efp(entry, 1 TSRMLS_CC); if (entry->link) { (*ret)->zero = phar_get_fp_offset(phar_get_link_source(entry TSRMLS_CC) TSRMLS_CC); } else { (*ret)->zero = phar_get_fp_offset(entry TSRMLS_CC); } } return SUCCESS; } /* }}} */ /** * Create a new dummy file slot within a writeable phar for a newly created file */ phar_entry_data *phar_get_or_create_entry_data(char *fname, int fname_len, char *path, int path_len, char *mode, char allow_dir, char **error, int security TSRMLS_DC) /* {{{ */ { phar_archive_data *phar; phar_entry_info *entry, etemp; phar_entry_data *ret; const char *pcr_error; char is_dir; #ifdef PHP_WIN32 phar_unixify_path_separators(path, path_len); #endif is_dir = (path_len && path[path_len - 1] == '/') ? 1 : 0; if (FAILURE == phar_get_archive(&phar, fname, fname_len, NULL, 0, error TSRMLS_CC)) { return NULL; } if (FAILURE == phar_get_entry_data(&ret, fname, fname_len, path, path_len, mode, allow_dir, error, security TSRMLS_CC)) { return NULL; } else if (ret) { return ret; } if (phar_path_check(&path, &path_len, &pcr_error) > pcr_is_ok) { if (error) { spprintf(error, 0, "phar error: invalid path \"%s\" contains %s", path, pcr_error); } return NULL; } if (phar->is_persistent && FAILURE == phar_copy_on_write(&phar TSRMLS_CC)) { if (error) { spprintf(error, 4096, "phar error: file \"%s\" in phar \"%s\" cannot be created, could not make cached phar writeable", path, fname); } return NULL; } /* create a new phar data holder */ ret = (phar_entry_data *) emalloc(sizeof(phar_entry_data)); /* create an entry, this is a new file */ memset(&etemp, 0, sizeof(phar_entry_info)); etemp.filename_len = path_len; etemp.fp_type = PHAR_MOD; etemp.fp = php_stream_fopen_tmpfile(); if (!etemp.fp) { if (error) { spprintf(error, 0, "phar error: unable to create temporary file"); } efree(ret); return NULL; } etemp.fp_refcount = 1; if (allow_dir == 2) { etemp.is_dir = 1; etemp.flags = etemp.old_flags = PHAR_ENT_PERM_DEF_DIR; } else { etemp.flags = etemp.old_flags = PHAR_ENT_PERM_DEF_FILE; } if (is_dir) { etemp.filename_len--; /* strip trailing / */ path_len--; } phar_add_virtual_dirs(phar, path, path_len TSRMLS_CC); etemp.is_modified = 1; etemp.timestamp = time(0); etemp.is_crc_checked = 1; etemp.phar = phar; etemp.filename = estrndup(path, path_len); etemp.is_zip = phar->is_zip; if (phar->is_tar) { etemp.is_tar = phar->is_tar; etemp.tar_type = etemp.is_dir ? TAR_DIR : TAR_FILE; } if (FAILURE == zend_hash_add(&phar->manifest, etemp.filename, path_len, (void*)&etemp, sizeof(phar_entry_info), (void **) &entry)) { php_stream_close(etemp.fp); if (error) { spprintf(error, 0, "phar error: unable to add new entry \"%s\" to phar \"%s\"", etemp.filename, phar->fname); } efree(ret); efree(etemp.filename); return NULL; } if (!entry) { php_stream_close(etemp.fp); efree(etemp.filename); efree(ret); return NULL; } ++(phar->refcount); ret->phar = phar; ret->fp = entry->fp; ret->position = ret->zero = 0; ret->for_write = 1; ret->is_zip = entry->is_zip; ret->is_tar = entry->is_tar; ret->internal_file = entry; return ret; } /* }}} */ /* initialize a phar_archive_data's read-only fp for existing phar data */ int phar_open_archive_fp(phar_archive_data *phar TSRMLS_DC) /* {{{ */ { if (phar_get_pharfp(phar TSRMLS_CC)) { return SUCCESS; } if (php_check_open_basedir(phar->fname TSRMLS_CC)) { return FAILURE; } phar_set_pharfp(phar, php_stream_open_wrapper(phar->fname, "rb", IGNORE_URL|STREAM_MUST_SEEK|0, NULL) TSRMLS_CC); if (!phar_get_pharfp(phar TSRMLS_CC)) { return FAILURE; } return SUCCESS; } /* }}} */ /* copy file data from an existing to a new phar_entry_info that is not in the manifest */ int phar_copy_entry_fp(phar_entry_info *source, phar_entry_info *dest, char **error TSRMLS_DC) /* {{{ */ { phar_entry_info *link; if (FAILURE == phar_open_entry_fp(source, error, 1 TSRMLS_CC)) { return FAILURE; } if (dest->link) { efree(dest->link); dest->link = NULL; dest->tar_type = (dest->is_tar ? TAR_FILE : '\0'); } dest->fp_type = PHAR_MOD; dest->offset = 0; dest->is_modified = 1; dest->fp = php_stream_fopen_tmpfile(); if (dest->fp == NULL) { spprintf(error, 0, "phar error: unable to create temporary file"); return EOF; } phar_seek_efp(source, 0, SEEK_SET, 0, 1 TSRMLS_CC); link = phar_get_link_source(source TSRMLS_CC); if (!link) { link = source; } if (SUCCESS != phar_stream_copy_to_stream(phar_get_efp(link, 0 TSRMLS_CC), dest->fp, link->uncompressed_filesize, NULL)) { php_stream_close(dest->fp); dest->fp_type = PHAR_FP; if (error) { spprintf(error, 4096, "phar error: unable to copy contents of file \"%s\" to \"%s\" in phar archive \"%s\"", source->filename, dest->filename, source->phar->fname); } return FAILURE; } return SUCCESS; } /* }}} */ /* open and decompress a compressed phar entry */ int phar_open_entry_fp(phar_entry_info *entry, char **error, int follow_links TSRMLS_DC) /* {{{ */ { php_stream_filter *filter; phar_archive_data *phar = entry->phar; char *filtername; off_t loc; php_stream *ufp; phar_entry_data dummy; if (follow_links && entry->link) { phar_entry_info *link_entry = phar_get_link_source(entry TSRMLS_CC); if (link_entry && link_entry != entry) { return phar_open_entry_fp(link_entry, error, 1 TSRMLS_CC); } } if (entry->is_modified) { return SUCCESS; } if (entry->fp_type == PHAR_TMP) { if (!entry->fp) { entry->fp = php_stream_open_wrapper(entry->tmp, "rb", STREAM_MUST_SEEK|0, NULL); } return SUCCESS; } if (entry->fp_type != PHAR_FP) { /* either newly created or already modified */ return SUCCESS; } if (!phar_get_pharfp(phar TSRMLS_CC)) { if (FAILURE == phar_open_archive_fp(phar TSRMLS_CC)) { spprintf(error, 4096, "phar error: Cannot open phar archive \"%s\" for reading", phar->fname); return FAILURE; } } if ((entry->old_flags && !(entry->old_flags & PHAR_ENT_COMPRESSION_MASK)) || !(entry->flags & PHAR_ENT_COMPRESSION_MASK)) { dummy.internal_file = entry; dummy.phar = phar; dummy.zero = entry->offset; dummy.fp = phar_get_pharfp(phar TSRMLS_CC); if (FAILURE == phar_postprocess_file(&dummy, entry->crc32, error, 1 TSRMLS_CC)) { return FAILURE; } return SUCCESS; } if (!phar_get_entrypufp(entry TSRMLS_CC)) { phar_set_entrypufp(entry, php_stream_fopen_tmpfile() TSRMLS_CC); if (!phar_get_entrypufp(entry TSRMLS_CC)) { spprintf(error, 4096, "phar error: Cannot open temporary file for decompressing phar archive \"%s\" file \"%s\"", phar->fname, entry->filename); return FAILURE; } } dummy.internal_file = entry; dummy.phar = phar; dummy.zero = entry->offset; dummy.fp = phar_get_pharfp(phar TSRMLS_CC); if (FAILURE == phar_postprocess_file(&dummy, entry->crc32, error, 1 TSRMLS_CC)) { return FAILURE; } ufp = phar_get_entrypufp(entry TSRMLS_CC); if ((filtername = phar_decompress_filter(entry, 0)) != NULL) { filter = php_stream_filter_create(filtername, NULL, 0 TSRMLS_CC); } else { filter = NULL; } if (!filter) { spprintf(error, 4096, "phar error: unable to read phar \"%s\" (cannot create %s filter while decompressing file \"%s\")", phar->fname, phar_decompress_filter(entry, 1), entry->filename); return FAILURE; } /* now we can safely use proper decompression */ /* save the new offset location within ufp */ php_stream_seek(ufp, 0, SEEK_END); loc = php_stream_tell(ufp); php_stream_filter_append(&ufp->writefilters, filter); php_stream_seek(phar_get_entrypfp(entry TSRMLS_CC), phar_get_fp_offset(entry TSRMLS_CC), SEEK_SET); if (entry->uncompressed_filesize) { if (SUCCESS != phar_stream_copy_to_stream(phar_get_entrypfp(entry TSRMLS_CC), ufp, entry->compressed_filesize, NULL)) { spprintf(error, 4096, "phar error: internal corruption of phar \"%s\" (actual filesize mismatch on file \"%s\")", phar->fname, entry->filename); php_stream_filter_remove(filter, 1 TSRMLS_CC); return FAILURE; } } php_stream_filter_flush(filter, 1); php_stream_flush(ufp); php_stream_filter_remove(filter, 1 TSRMLS_CC); if (php_stream_tell(ufp) - loc != (off_t) entry->uncompressed_filesize) { spprintf(error, 4096, "phar error: internal corruption of phar \"%s\" (actual filesize mismatch on file \"%s\")", phar->fname, entry->filename); return FAILURE; } entry->old_flags = entry->flags; /* this is now the new location of the file contents within this fp */ phar_set_fp_type(entry, PHAR_UFP, loc TSRMLS_CC); dummy.zero = entry->offset; dummy.fp = ufp; if (FAILURE == phar_postprocess_file(&dummy, entry->crc32, error, 0 TSRMLS_CC)) { return FAILURE; } return SUCCESS; } /* }}} */ int phar_create_writeable_entry(phar_archive_data *phar, phar_entry_info *entry, char **error TSRMLS_DC) /* {{{ */ { if (entry->fp_type == PHAR_MOD) { /* already newly created, truncate */ php_stream_truncate_set_size(entry->fp, 0); entry->old_flags = entry->flags; entry->is_modified = 1; phar->is_modified = 1; /* reset file size */ entry->uncompressed_filesize = 0; entry->compressed_filesize = 0; entry->crc32 = 0; entry->flags = PHAR_ENT_PERM_DEF_FILE; entry->fp_type = PHAR_MOD; entry->offset = 0; return SUCCESS; } if (error) { *error = NULL; } /* open a new temp file for writing */ if (entry->link) { efree(entry->link); entry->link = NULL; entry->tar_type = (entry->is_tar ? TAR_FILE : '\0'); } entry->fp = php_stream_fopen_tmpfile(); if (!entry->fp) { if (error) { spprintf(error, 0, "phar error: unable to create temporary file"); } return FAILURE; } entry->old_flags = entry->flags; entry->is_modified = 1; phar->is_modified = 1; /* reset file size */ entry->uncompressed_filesize = 0; entry->compressed_filesize = 0; entry->crc32 = 0; entry->flags = PHAR_ENT_PERM_DEF_FILE; entry->fp_type = PHAR_MOD; entry->offset = 0; return SUCCESS; } /* }}} */ int phar_separate_entry_fp(phar_entry_info *entry, char **error TSRMLS_DC) /* {{{ */ { php_stream *fp; phar_entry_info *link; if (FAILURE == phar_open_entry_fp(entry, error, 1 TSRMLS_CC)) { return FAILURE; } if (entry->fp_type == PHAR_MOD) { return SUCCESS; } fp = php_stream_fopen_tmpfile(); if (fp == NULL) { spprintf(error, 0, "phar error: unable to create temporary file"); return FAILURE; } phar_seek_efp(entry, 0, SEEK_SET, 0, 1 TSRMLS_CC); link = phar_get_link_source(entry TSRMLS_CC); if (!link) { link = entry; } if (SUCCESS != phar_stream_copy_to_stream(phar_get_efp(link, 0 TSRMLS_CC), fp, link->uncompressed_filesize, NULL)) { if (error) { spprintf(error, 4096, "phar error: cannot separate entry file \"%s\" contents in phar archive \"%s\" for write access", entry->filename, entry->phar->fname); } return FAILURE; } if (entry->link) { efree(entry->link); entry->link = NULL; entry->tar_type = (entry->is_tar ? TAR_FILE : '\0'); } entry->offset = 0; entry->fp = fp; entry->fp_type = PHAR_MOD; entry->is_modified = 1; return SUCCESS; } /* }}} */ /** * helper function to open an internal file's fp just-in-time */ phar_entry_info * phar_open_jit(phar_archive_data *phar, phar_entry_info *entry, char **error TSRMLS_DC) /* {{{ */ { if (error) { *error = NULL; } /* seek to start of internal file and read it */ if (FAILURE == phar_open_entry_fp(entry, error, 1 TSRMLS_CC)) { return NULL; } if (-1 == phar_seek_efp(entry, 0, SEEK_SET, 0, 1 TSRMLS_CC)) { spprintf(error, 4096, "phar error: cannot seek to start of file \"%s\" in phar \"%s\"", entry->filename, phar->fname); return NULL; } return entry; } /* }}} */ PHP_PHAR_API int phar_resolve_alias(char *alias, int alias_len, char **filename, int *filename_len TSRMLS_DC) /* {{{ */ { phar_archive_data **fd_ptr; if (PHAR_GLOBALS->phar_alias_map.arBuckets && SUCCESS == zend_hash_find(&(PHAR_GLOBALS->phar_alias_map), alias, alias_len, (void**)&fd_ptr)) { *filename = (*fd_ptr)->fname; *filename_len = (*fd_ptr)->fname_len; return SUCCESS; } return FAILURE; } /* }}} */ int phar_free_alias(phar_archive_data *phar, char *alias, int alias_len TSRMLS_DC) /* {{{ */ { if (phar->refcount || phar->is_persistent) { return FAILURE; } /* this archive has no open references, so emit an E_STRICT and remove it */ if (zend_hash_del(&(PHAR_GLOBALS->phar_fname_map), phar->fname, phar->fname_len) != SUCCESS) { return FAILURE; } /* invalidate phar cache */ PHAR_G(last_phar) = NULL; PHAR_G(last_phar_name) = PHAR_G(last_alias) = NULL; return SUCCESS; } /* }}} */ /** * Looks up a phar archive in the filename map, connecting it to the alias * (if any) or returns null */ int phar_get_archive(phar_archive_data **archive, char *fname, int fname_len, char *alias, int alias_len, char **error TSRMLS_DC) /* {{{ */ { phar_archive_data *fd, **fd_ptr; char *my_realpath, *save; int save_len; ulong fhash, ahash = 0; phar_request_initialize(TSRMLS_C); if (error) { *error = NULL; } *archive = NULL; if (PHAR_G(last_phar) && fname_len == PHAR_G(last_phar_name_len) && !memcmp(fname, PHAR_G(last_phar_name), fname_len)) { *archive = PHAR_G(last_phar); if (alias && alias_len) { if (!PHAR_G(last_phar)->is_temporary_alias && (alias_len != PHAR_G(last_phar)->alias_len || memcmp(PHAR_G(last_phar)->alias, alias, alias_len))) { if (error) { spprintf(error, 0, "alias \"%s\" is already used for archive \"%s\" cannot be overloaded with \"%s\"", alias, PHAR_G(last_phar)->fname, fname); } *archive = NULL; return FAILURE; } if (PHAR_G(last_phar)->alias_len && SUCCESS == zend_hash_find(&(PHAR_GLOBALS->phar_alias_map), PHAR_G(last_phar)->alias, PHAR_G(last_phar)->alias_len, (void**)&fd_ptr)) { zend_hash_del(&(PHAR_GLOBALS->phar_alias_map), PHAR_G(last_phar)->alias, PHAR_G(last_phar)->alias_len); } zend_hash_add(&(PHAR_GLOBALS->phar_alias_map), alias, alias_len, (void*)&(*archive), sizeof(phar_archive_data*), NULL); PHAR_G(last_alias) = alias; PHAR_G(last_alias_len) = alias_len; } return SUCCESS; } if (alias && alias_len && PHAR_G(last_phar) && alias_len == PHAR_G(last_alias_len) && !memcmp(alias, PHAR_G(last_alias), alias_len)) { fd = PHAR_G(last_phar); fd_ptr = &fd; goto alias_success; } if (alias && alias_len) { ahash = zend_inline_hash_func(alias, alias_len); if (SUCCESS == zend_hash_quick_find(&(PHAR_GLOBALS->phar_alias_map), alias, alias_len, ahash, (void**)&fd_ptr)) { alias_success: if (fname && (fname_len != (*fd_ptr)->fname_len || strncmp(fname, (*fd_ptr)->fname, fname_len))) { if (error) { spprintf(error, 0, "alias \"%s\" is already used for archive \"%s\" cannot be overloaded with \"%s\"", alias, (*fd_ptr)->fname, fname); } if (SUCCESS == phar_free_alias(*fd_ptr, alias, alias_len TSRMLS_CC)) { if (error) { efree(*error); *error = NULL; } } return FAILURE; } *archive = *fd_ptr; fd = *fd_ptr; PHAR_G(last_phar) = fd; PHAR_G(last_phar_name) = fd->fname; PHAR_G(last_phar_name_len) = fd->fname_len; PHAR_G(last_alias) = alias; PHAR_G(last_alias_len) = alias_len; return SUCCESS; } if (PHAR_G(manifest_cached) && SUCCESS == zend_hash_quick_find(&cached_alias, alias, alias_len, ahash, (void **)&fd_ptr)) { goto alias_success; } } fhash = zend_inline_hash_func(fname, fname_len); my_realpath = NULL; save = fname; save_len = fname_len; if (fname && fname_len) { if (SUCCESS == zend_hash_quick_find(&(PHAR_GLOBALS->phar_fname_map), fname, fname_len, fhash, (void**)&fd_ptr)) { *archive = *fd_ptr; fd = *fd_ptr; if (alias && alias_len) { if (!fd->is_temporary_alias && (alias_len != fd->alias_len || memcmp(fd->alias, alias, alias_len))) { if (error) { spprintf(error, 0, "alias \"%s\" is already used for archive \"%s\" cannot be overloaded with \"%s\"", alias, (*fd_ptr)->fname, fname); } return FAILURE; } if (fd->alias_len && SUCCESS == zend_hash_find(&(PHAR_GLOBALS->phar_alias_map), fd->alias, fd->alias_len, (void**)&fd_ptr)) { zend_hash_del(&(PHAR_GLOBALS->phar_alias_map), fd->alias, fd->alias_len); } zend_hash_quick_add(&(PHAR_GLOBALS->phar_alias_map), alias, alias_len, ahash, (void*)&fd, sizeof(phar_archive_data*), NULL); } PHAR_G(last_phar) = fd; PHAR_G(last_phar_name) = fd->fname; PHAR_G(last_phar_name_len) = fd->fname_len; PHAR_G(last_alias) = fd->alias; PHAR_G(last_alias_len) = fd->alias_len; return SUCCESS; } if (PHAR_G(manifest_cached) && SUCCESS == zend_hash_quick_find(&cached_phars, fname, fname_len, fhash, (void**)&fd_ptr)) { *archive = *fd_ptr; fd = *fd_ptr; /* this could be problematic - alias should never be different from manifest alias for cached phars */ if (!fd->is_temporary_alias && alias && alias_len) { if (alias_len != fd->alias_len || memcmp(fd->alias, alias, alias_len)) { if (error) { spprintf(error, 0, "alias \"%s\" is already used for archive \"%s\" cannot be overloaded with \"%s\"", alias, (*fd_ptr)->fname, fname); } return FAILURE; } } PHAR_G(last_phar) = fd; PHAR_G(last_phar_name) = fd->fname; PHAR_G(last_phar_name_len) = fd->fname_len; PHAR_G(last_alias) = fd->alias; PHAR_G(last_alias_len) = fd->alias_len; return SUCCESS; } if (SUCCESS == zend_hash_quick_find(&(PHAR_GLOBALS->phar_alias_map), save, save_len, fhash, (void**)&fd_ptr)) { fd = *archive = *fd_ptr; PHAR_G(last_phar) = fd; PHAR_G(last_phar_name) = fd->fname; PHAR_G(last_phar_name_len) = fd->fname_len; PHAR_G(last_alias) = fd->alias; PHAR_G(last_alias_len) = fd->alias_len; return SUCCESS; } if (PHAR_G(manifest_cached) && SUCCESS == zend_hash_quick_find(&cached_alias, save, save_len, fhash, (void**)&fd_ptr)) { fd = *archive = *fd_ptr; PHAR_G(last_phar) = fd; PHAR_G(last_phar_name) = fd->fname; PHAR_G(last_phar_name_len) = fd->fname_len; PHAR_G(last_alias) = fd->alias; PHAR_G(last_alias_len) = fd->alias_len; return SUCCESS; } /* not found, try converting \ to / */ my_realpath = expand_filepath(fname, my_realpath TSRMLS_CC); if (my_realpath) { fname_len = strlen(my_realpath); fname = my_realpath; } else { return FAILURE; } #ifdef PHP_WIN32 phar_unixify_path_separators(fname, fname_len); #endif fhash = zend_inline_hash_func(fname, fname_len); if (SUCCESS == zend_hash_quick_find(&(PHAR_GLOBALS->phar_fname_map), fname, fname_len, fhash, (void**)&fd_ptr)) { realpath_success: *archive = *fd_ptr; fd = *fd_ptr; if (alias && alias_len) { zend_hash_quick_add(&(PHAR_GLOBALS->phar_alias_map), alias, alias_len, ahash, (void*)&fd, sizeof(phar_archive_data*), NULL); } efree(my_realpath); PHAR_G(last_phar) = fd; PHAR_G(last_phar_name) = fd->fname; PHAR_G(last_phar_name_len) = fd->fname_len; PHAR_G(last_alias) = fd->alias; PHAR_G(last_alias_len) = fd->alias_len; return SUCCESS; } if (PHAR_G(manifest_cached) && SUCCESS == zend_hash_quick_find(&cached_phars, fname, fname_len, fhash, (void**)&fd_ptr)) { goto realpath_success; } efree(my_realpath); } return FAILURE; } /* }}} */ /** * Determine which stream compression filter (if any) we need to read this file */ char * phar_compress_filter(phar_entry_info * entry, int return_unknown) /* {{{ */ { switch (entry->flags & PHAR_ENT_COMPRESSION_MASK) { case PHAR_ENT_COMPRESSED_GZ: return "zlib.deflate"; case PHAR_ENT_COMPRESSED_BZ2: return "bzip2.compress"; default: return return_unknown ? "unknown" : NULL; } } /* }}} */ /** * Determine which stream decompression filter (if any) we need to read this file */ char * phar_decompress_filter(phar_entry_info * entry, int return_unknown) /* {{{ */ { php_uint32 flags; if (entry->is_modified) { flags = entry->old_flags; } else { flags = entry->flags; } switch (flags & PHAR_ENT_COMPRESSION_MASK) { case PHAR_ENT_COMPRESSED_GZ: return "zlib.inflate"; case PHAR_ENT_COMPRESSED_BZ2: return "bzip2.decompress"; default: return return_unknown ? "unknown" : NULL; } } /* }}} */ /** * retrieve information on a file contained within a phar, or null if it ain't there */ phar_entry_info *phar_get_entry_info(phar_archive_data *phar, char *path, int path_len, char **error, int security TSRMLS_DC) /* {{{ */ { return phar_get_entry_info_dir(phar, path, path_len, 0, error, security TSRMLS_CC); } /* }}} */ /** * retrieve information on a file or directory contained within a phar, or null if none found * allow_dir is 0 for none, 1 for both empty directories in the phar and temp directories, and 2 for only * valid pre-existing empty directory entries */ phar_entry_info *phar_get_entry_info_dir(phar_archive_data *phar, char *path, int path_len, char dir, char **error, int security TSRMLS_DC) /* {{{ */ { const char *pcr_error; phar_entry_info *entry; int is_dir; #ifdef PHP_WIN32 phar_unixify_path_separators(path, path_len); #endif is_dir = (path_len && (path[path_len - 1] == '/')) ? 1 : 0; if (error) { *error = NULL; } if (security && path_len >= sizeof(".phar")-1 && !memcmp(path, ".phar", sizeof(".phar")-1)) { if (error) { spprintf(error, 4096, "phar error: cannot directly access magic \".phar\" directory or files within it"); } return NULL; } if (!path_len && !dir) { if (error) { spprintf(error, 4096, "phar error: invalid path \"%s\" must not be empty", path); } return NULL; } if (phar_path_check(&path, &path_len, &pcr_error) > pcr_is_ok) { if (error) { spprintf(error, 4096, "phar error: invalid path \"%s\" contains %s", path, pcr_error); } return NULL; } if (!phar->manifest.arBuckets) { return NULL; } if (is_dir) { if (!path_len || path_len == 1) { return NULL; } path_len--; } if (SUCCESS == zend_hash_find(&phar->manifest, path, path_len, (void**)&entry)) { if (entry->is_deleted) { /* entry is deleted, but has not been flushed to disk yet */ return NULL; } if (entry->is_dir && !dir) { if (error) { spprintf(error, 4096, "phar error: path \"%s\" is a directory", path); } return NULL; } if (!entry->is_dir && dir == 2) { /* user requested a directory, we must return one */ if (error) { spprintf(error, 4096, "phar error: path \"%s\" exists and is a not a directory", path); } return NULL; } return entry; } if (dir) { if (zend_hash_exists(&phar->virtual_dirs, path, path_len)) { /* a file or directory exists in a sub-directory of this path */ entry = (phar_entry_info *) ecalloc(1, sizeof(phar_entry_info)); /* this next line tells PharFileInfo->__destruct() to efree the filename */ entry->is_temp_dir = entry->is_dir = 1; entry->filename = (char *) estrndup(path, path_len + 1); entry->filename_len = path_len; entry->phar = phar; return entry; } } if (phar->mounted_dirs.arBuckets && zend_hash_num_elements(&phar->mounted_dirs)) { phar_zstr key; char *str_key; ulong unused; uint keylen; zend_hash_internal_pointer_reset(&phar->mounted_dirs); while (FAILURE != zend_hash_has_more_elements(&phar->mounted_dirs)) { if (HASH_KEY_NON_EXISTENT == zend_hash_get_current_key_ex(&phar->mounted_dirs, &key, &keylen, &unused, 0, NULL)) { break; } PHAR_STR(key, str_key); if ((int)keylen >= path_len || strncmp(str_key, path, keylen)) { PHAR_STR_FREE(str_key); continue; } else { char *test; int test_len; php_stream_statbuf ssb; if (SUCCESS != zend_hash_find(&phar->manifest, str_key, keylen, (void **) &entry)) { if (error) { spprintf(error, 4096, "phar internal error: mounted path \"%s\" could not be retrieved from manifest", str_key); } PHAR_STR_FREE(str_key); return NULL; } if (!entry->tmp || !entry->is_mounted) { if (error) { spprintf(error, 4096, "phar internal error: mounted path \"%s\" is not properly initialized as a mounted path", str_key); } PHAR_STR_FREE(str_key); return NULL; } PHAR_STR_FREE(str_key); test_len = spprintf(&test, MAXPATHLEN, "%s%s", entry->tmp, path + keylen); if (SUCCESS != php_stream_stat_path(test, &ssb)) { efree(test); return NULL; } if (ssb.sb.st_mode & S_IFDIR && !dir) { efree(test); if (error) { spprintf(error, 4096, "phar error: path \"%s\" is a directory", path); } return NULL; } if ((ssb.sb.st_mode & S_IFDIR) == 0 && dir) { efree(test); /* user requested a directory, we must return one */ if (error) { spprintf(error, 4096, "phar error: path \"%s\" exists and is a not a directory", path); } return NULL; } /* mount the file just in time */ if (SUCCESS != phar_mount_entry(phar, test, test_len, path, path_len TSRMLS_CC)) { efree(test); if (error) { spprintf(error, 4096, "phar error: path \"%s\" exists as file \"%s\" and could not be mounted", path, test); } return NULL; } efree(test); if (SUCCESS != zend_hash_find(&phar->manifest, path, path_len, (void**)&entry)) { if (error) { spprintf(error, 4096, "phar error: path \"%s\" exists as file \"%s\" and could not be retrieved after being mounted", path, test); } return NULL; } return entry; } } } return NULL; } /* }}} */ static const char hexChars[] = "0123456789ABCDEF"; static int phar_hex_str(const char *digest, size_t digest_len, char **signature TSRMLS_DC) /* {{{ */ { int pos = -1; size_t len = 0; *signature = (char*)safe_pemalloc(digest_len, 2, 1, PHAR_G(persist)); for (; len < digest_len; ++len) { (*signature)[++pos] = hexChars[((const unsigned char *)digest)[len] >> 4]; (*signature)[++pos] = hexChars[((const unsigned char *)digest)[len] & 0x0F]; } (*signature)[++pos] = '\0'; return pos; } /* }}} */ #ifndef PHAR_HAVE_OPENSSL static int phar_call_openssl_signverify(int is_sign, php_stream *fp, off_t end, char *key, int key_len, char **signature, int *signature_len TSRMLS_DC) /* {{{ */ { zend_fcall_info fci; zend_fcall_info_cache fcc; zval *zdata, *zsig, *zkey, *retval_ptr, **zp[3], *openssl; MAKE_STD_ZVAL(zdata); MAKE_STD_ZVAL(openssl); ZVAL_STRINGL(openssl, is_sign ? "openssl_sign" : "openssl_verify", is_sign ? sizeof("openssl_sign")-1 : sizeof("openssl_verify")-1, 1); MAKE_STD_ZVAL(zsig); ZVAL_STRINGL(zsig, *signature, *signature_len, 1); MAKE_STD_ZVAL(zkey); ZVAL_STRINGL(zkey, key, key_len, 1); zp[0] = &zdata; zp[1] = &zsig; zp[2] = &zkey; php_stream_rewind(fp); Z_TYPE_P(zdata) = IS_STRING; Z_STRLEN_P(zdata) = end; if (end != (off_t) php_stream_copy_to_mem(fp, &(Z_STRVAL_P(zdata)), (size_t) end, 0)) { zval_dtor(zdata); zval_dtor(zsig); zval_dtor(zkey); zval_dtor(openssl); efree(openssl); efree(zdata); efree(zkey); efree(zsig); return FAILURE; } if (FAILURE == zend_fcall_info_init(openssl, 0, &fci, &fcc, NULL, NULL TSRMLS_CC)) { zval_dtor(zdata); zval_dtor(zsig); zval_dtor(zkey); zval_dtor(openssl); efree(openssl); efree(zdata); efree(zkey); efree(zsig); return FAILURE; } fci.param_count = 3; fci.params = zp; Z_ADDREF_P(zdata); if (is_sign) { Z_SET_ISREF_P(zsig); } else { Z_ADDREF_P(zsig); } Z_ADDREF_P(zkey); fci.retval_ptr_ptr = &retval_ptr; if (FAILURE == zend_call_function(&fci, &fcc TSRMLS_CC)) { zval_dtor(zdata); zval_dtor(zsig); zval_dtor(zkey); zval_dtor(openssl); efree(openssl); efree(zdata); efree(zkey); efree(zsig); return FAILURE; } zval_dtor(openssl); efree(openssl); Z_DELREF_P(zdata); if (is_sign) { Z_UNSET_ISREF_P(zsig); } else { Z_DELREF_P(zsig); } Z_DELREF_P(zkey); zval_dtor(zdata); efree(zdata); zval_dtor(zkey); efree(zkey); switch (Z_TYPE_P(retval_ptr)) { default: case IS_LONG: zval_dtor(zsig); efree(zsig); if (1 == Z_LVAL_P(retval_ptr)) { efree(retval_ptr); return SUCCESS; } efree(retval_ptr); return FAILURE; case IS_BOOL: efree(retval_ptr); if (Z_BVAL_P(retval_ptr)) { *signature = estrndup(Z_STRVAL_P(zsig), Z_STRLEN_P(zsig)); *signature_len = Z_STRLEN_P(zsig); zval_dtor(zsig); efree(zsig); return SUCCESS; } zval_dtor(zsig); efree(zsig); return FAILURE; } } /* }}} */ #endif /* #ifndef PHAR_HAVE_OPENSSL */ int phar_verify_signature(php_stream *fp, size_t end_of_phar, php_uint32 sig_type, char *sig, int sig_len, char *fname, char **signature, int *signature_len, char **error TSRMLS_DC) /* {{{ */ { int read_size, len; off_t read_len; unsigned char buf[1024]; php_stream_rewind(fp); switch (sig_type) { case PHAR_SIG_OPENSSL: { #ifdef PHAR_HAVE_OPENSSL BIO *in; EVP_PKEY *key; EVP_MD *mdtype = (EVP_MD *) EVP_sha1(); EVP_MD_CTX md_ctx; #else int tempsig; #endif php_uint32 pubkey_len; char *pubkey = NULL, *pfile; php_stream *pfp; #ifndef PHAR_HAVE_OPENSSL if (!zend_hash_exists(&module_registry, "openssl", sizeof("openssl"))) { if (error) { spprintf(error, 0, "openssl not loaded"); } return FAILURE; } #endif /* use __FILE__ . '.pubkey' for public key file */ spprintf(&pfile, 0, "%s.pubkey", fname); pfp = php_stream_open_wrapper(pfile, "rb", 0, NULL); efree(pfile); #if PHP_MAJOR_VERSION > 5 if (!pfp || !(pubkey_len = php_stream_copy_to_mem(pfp, (void **) &pubkey, PHP_STREAM_COPY_ALL, 0)) || !pubkey) { #else if (!pfp || !(pubkey_len = php_stream_copy_to_mem(pfp, &pubkey, PHP_STREAM_COPY_ALL, 0)) || !pubkey) { #endif if (pfp) { php_stream_close(pfp); } if (error) { spprintf(error, 0, "openssl public key could not be read"); } return FAILURE; } php_stream_close(pfp); #ifndef PHAR_HAVE_OPENSSL tempsig = sig_len; if (FAILURE == phar_call_openssl_signverify(0, fp, end_of_phar, pubkey, pubkey_len, &sig, &tempsig TSRMLS_CC)) { if (pubkey) { efree(pubkey); } if (error) { spprintf(error, 0, "openssl signature could not be verified"); } return FAILURE; } if (pubkey) { efree(pubkey); } sig_len = tempsig; #else in = BIO_new_mem_buf(pubkey, pubkey_len); if (NULL == in) { efree(pubkey); if (error) { spprintf(error, 0, "openssl signature could not be processed"); } return FAILURE; } key = PEM_read_bio_PUBKEY(in, NULL,NULL, NULL); BIO_free(in); efree(pubkey); if (NULL == key) { if (error) { spprintf(error, 0, "openssl signature could not be processed"); } return FAILURE; } EVP_VerifyInit(&md_ctx, mdtype); read_len = end_of_phar; if (read_len > sizeof(buf)) { read_size = sizeof(buf); } else { read_size = (int)read_len; } php_stream_seek(fp, 0, SEEK_SET); while (read_size && (len = php_stream_read(fp, (char*)buf, read_size)) > 0) { EVP_VerifyUpdate (&md_ctx, buf, len); read_len -= (off_t)len; if (read_len < read_size) { read_size = (int)read_len; } } if (EVP_VerifyFinal(&md_ctx, (unsigned char *)sig, sig_len, key) != 1) { /* 1: signature verified, 0: signature does not match, -1: failed signature operation */ EVP_MD_CTX_cleanup(&md_ctx); if (error) { spprintf(error, 0, "broken openssl signature"); } return FAILURE; } EVP_MD_CTX_cleanup(&md_ctx); #endif *signature_len = phar_hex_str((const char*)sig, sig_len, signature TSRMLS_CC); } break; #ifdef PHAR_HASH_OK case PHAR_SIG_SHA512: { unsigned char digest[64]; PHP_SHA512_CTX context; PHP_SHA512Init(&context); read_len = end_of_phar; if (read_len > sizeof(buf)) { read_size = sizeof(buf); } else { read_size = (int)read_len; } while ((len = php_stream_read(fp, (char*)buf, read_size)) > 0) { PHP_SHA512Update(&context, buf, len); read_len -= (off_t)len; if (read_len < read_size) { read_size = (int)read_len; } } PHP_SHA512Final(digest, &context); if (memcmp(digest, sig, sizeof(digest))) { if (error) { spprintf(error, 0, "broken signature"); } return FAILURE; } *signature_len = phar_hex_str((const char*)digest, sizeof(digest), signature TSRMLS_CC); break; } case PHAR_SIG_SHA256: { unsigned char digest[32]; PHP_SHA256_CTX context; PHP_SHA256Init(&context); read_len = end_of_phar; if (read_len > sizeof(buf)) { read_size = sizeof(buf); } else { read_size = (int)read_len; } while ((len = php_stream_read(fp, (char*)buf, read_size)) > 0) { PHP_SHA256Update(&context, buf, len); read_len -= (off_t)len; if (read_len < read_size) { read_size = (int)read_len; } } PHP_SHA256Final(digest, &context); if (memcmp(digest, sig, sizeof(digest))) { if (error) { spprintf(error, 0, "broken signature"); } return FAILURE; } *signature_len = phar_hex_str((const char*)digest, sizeof(digest), signature TSRMLS_CC); break; } #else case PHAR_SIG_SHA512: case PHAR_SIG_SHA256: if (error) { spprintf(error, 0, "unsupported signature"); } return FAILURE; #endif case PHAR_SIG_SHA1: { unsigned char digest[20]; PHP_SHA1_CTX context; PHP_SHA1Init(&context); read_len = end_of_phar; if (read_len > sizeof(buf)) { read_size = sizeof(buf); } else { read_size = (int)read_len; } while ((len = php_stream_read(fp, (char*)buf, read_size)) > 0) { PHP_SHA1Update(&context, buf, len); read_len -= (off_t)len; if (read_len < read_size) { read_size = (int)read_len; } } PHP_SHA1Final(digest, &context); if (memcmp(digest, sig, sizeof(digest))) { if (error) { spprintf(error, 0, "broken signature"); } return FAILURE; } *signature_len = phar_hex_str((const char*)digest, sizeof(digest), signature TSRMLS_CC); break; } case PHAR_SIG_MD5: { unsigned char digest[16]; PHP_MD5_CTX context; PHP_MD5Init(&context); read_len = end_of_phar; if (read_len > sizeof(buf)) { read_size = sizeof(buf); } else { read_size = (int)read_len; } while ((len = php_stream_read(fp, (char*)buf, read_size)) > 0) { PHP_MD5Update(&context, buf, len); read_len -= (off_t)len; if (read_len < read_size) { read_size = (int)read_len; } } PHP_MD5Final(digest, &context); if (memcmp(digest, sig, sizeof(digest))) { if (error) { spprintf(error, 0, "broken signature"); } return FAILURE; } *signature_len = phar_hex_str((const char*)digest, sizeof(digest), signature TSRMLS_CC); break; } default: if (error) { spprintf(error, 0, "broken or unsupported signature"); } return FAILURE; } return SUCCESS; } /* }}} */ Commit Message: CWE ID:
int phar_get_entry_data(phar_entry_data **ret, char *fname, int fname_len, char *path, int path_len, char *mode, char allow_dir, char **error, int security TSRMLS_DC) /* {{{ */ { phar_archive_data *phar; phar_entry_info *entry; int for_write = mode[0] != 'r' || mode[1] == '+'; int for_append = mode[0] == 'a'; int for_create = mode[0] != 'r'; int for_trunc = mode[0] == 'w'; if (!ret) { return FAILURE; } *ret = NULL; if (error) { *error = NULL; } if (FAILURE == phar_get_archive(&phar, fname, fname_len, NULL, 0, error TSRMLS_CC)) { return FAILURE; } if (for_write && PHAR_G(readonly) && !phar->is_data) { if (error) { spprintf(error, 4096, "phar error: file \"%s\" in phar \"%s\" cannot be opened for writing, disabled by ini setting", path, fname); } return FAILURE; } if (!path_len) { if (error) { spprintf(error, 4096, "phar error: file \"\" in phar \"%s\" cannot be empty", fname); } return FAILURE; } really_get_entry: if (allow_dir) { if ((entry = phar_get_entry_info_dir(phar, path, path_len, allow_dir, for_create && !PHAR_G(readonly) && !phar->is_data ? NULL : error, security TSRMLS_CC)) == NULL) { if (for_create && (!PHAR_G(readonly) || phar->is_data)) { return SUCCESS; } return FAILURE; } } else { if ((entry = phar_get_entry_info(phar, path, path_len, for_create && !PHAR_G(readonly) && !phar->is_data ? NULL : error, security TSRMLS_CC)) == NULL) { if (for_create && (!PHAR_G(readonly) || phar->is_data)) { return SUCCESS; } return FAILURE; } } if (for_write && phar->is_persistent) { if (FAILURE == phar_copy_on_write(&phar TSRMLS_CC)) { if (error) { spprintf(error, 4096, "phar error: file \"%s\" in phar \"%s\" cannot be opened for writing, could not make cached phar writeable", path, fname); } return FAILURE; } else { goto really_get_entry; } } if (entry->is_modified && !for_write) { if (error) { spprintf(error, 4096, "phar error: file \"%s\" in phar \"%s\" cannot be opened for reading, writable file pointers are open", path, fname); } return FAILURE; } if (entry->fp_refcount && for_write) { if (error) { spprintf(error, 4096, "phar error: file \"%s\" in phar \"%s\" cannot be opened for writing, readable file pointers are open", path, fname); } return FAILURE; } if (entry->is_deleted) { if (!for_create) { return FAILURE; } entry->is_deleted = 0; } if (entry->is_dir) { *ret = (phar_entry_data *) emalloc(sizeof(phar_entry_data)); (*ret)->position = 0; (*ret)->fp = NULL; (*ret)->phar = phar; (*ret)->for_write = for_write; (*ret)->internal_file = entry; (*ret)->is_zip = entry->is_zip; (*ret)->is_tar = entry->is_tar; if (!phar->is_persistent) { ++(entry->phar->refcount); ++(entry->fp_refcount); } return SUCCESS; } if (entry->fp_type == PHAR_MOD) { if (for_trunc) { if (FAILURE == phar_create_writeable_entry(phar, entry, error TSRMLS_CC)) { return FAILURE; } } else if (for_append) { phar_seek_efp(entry, 0, SEEK_END, 0, 0 TSRMLS_CC); } } else { if (for_write) { if (entry->link) { efree(entry->link); entry->link = NULL; entry->tar_type = (entry->is_tar ? TAR_FILE : '\0'); } if (for_trunc) { if (FAILURE == phar_create_writeable_entry(phar, entry, error TSRMLS_CC)) { return FAILURE; } } else { if (FAILURE == phar_separate_entry_fp(entry, error TSRMLS_CC)) { return FAILURE; } } } else { if (FAILURE == phar_open_entry_fp(entry, error, 1 TSRMLS_CC)) { return FAILURE; } } } *ret = (phar_entry_data *) emalloc(sizeof(phar_entry_data)); (*ret)->position = 0; (*ret)->phar = phar; (*ret)->for_write = for_write; (*ret)->internal_file = entry; (*ret)->is_zip = entry->is_zip; (*ret)->is_tar = entry->is_tar; (*ret)->fp = phar_get_efp(entry, 1 TSRMLS_CC); if (entry->link) { phar_entry_info *link = phar_get_link_source(entry TSRMLS_CC); if(!link) { return FAILURE; } (*ret)->zero = phar_get_fp_offset(link TSRMLS_CC); } else { (*ret)->zero = phar_get_fp_offset(entry TSRMLS_CC); } } return SUCCESS; } /* }}} */ /** * Create a new dummy file slot within a writeable phar for a newly created file */ phar_entry_data *phar_get_or_create_entry_data(char *fname, int fname_len, char *path, int path_len, char *mode, char allow_dir, char **error, int security TSRMLS_DC) /* {{{ */ { phar_archive_data *phar; phar_entry_info *entry, etemp; phar_entry_data *ret; const char *pcr_error; char is_dir; #ifdef PHP_WIN32 phar_unixify_path_separators(path, path_len); #endif is_dir = (path_len && path[path_len - 1] == '/') ? 1 : 0; if (FAILURE == phar_get_archive(&phar, fname, fname_len, NULL, 0, error TSRMLS_CC)) { return NULL; } if (FAILURE == phar_get_entry_data(&ret, fname, fname_len, path, path_len, mode, allow_dir, error, security TSRMLS_CC)) { return NULL; } else if (ret) { return ret; } if (phar_path_check(&path, &path_len, &pcr_error) > pcr_is_ok) { if (error) { spprintf(error, 0, "phar error: invalid path \"%s\" contains %s", path, pcr_error); } return NULL; } if (phar->is_persistent && FAILURE == phar_copy_on_write(&phar TSRMLS_CC)) { if (error) { spprintf(error, 4096, "phar error: file \"%s\" in phar \"%s\" cannot be created, could not make cached phar writeable", path, fname); } return NULL; } /* create a new phar data holder */ ret = (phar_entry_data *) emalloc(sizeof(phar_entry_data)); /* create an entry, this is a new file */ memset(&etemp, 0, sizeof(phar_entry_info)); etemp.filename_len = path_len; etemp.fp_type = PHAR_MOD; etemp.fp = php_stream_fopen_tmpfile(); if (!etemp.fp) { if (error) { spprintf(error, 0, "phar error: unable to create temporary file"); } efree(ret); return NULL; } etemp.fp_refcount = 1; if (allow_dir == 2) { etemp.is_dir = 1; etemp.flags = etemp.old_flags = PHAR_ENT_PERM_DEF_DIR; } else { etemp.flags = etemp.old_flags = PHAR_ENT_PERM_DEF_FILE; } if (is_dir) { etemp.filename_len--; /* strip trailing / */ path_len--; } phar_add_virtual_dirs(phar, path, path_len TSRMLS_CC); etemp.is_modified = 1; etemp.timestamp = time(0); etemp.is_crc_checked = 1; etemp.phar = phar; etemp.filename = estrndup(path, path_len); etemp.is_zip = phar->is_zip; if (phar->is_tar) { etemp.is_tar = phar->is_tar; etemp.tar_type = etemp.is_dir ? TAR_DIR : TAR_FILE; } if (FAILURE == zend_hash_add(&phar->manifest, etemp.filename, path_len, (void*)&etemp, sizeof(phar_entry_info), (void **) &entry)) { php_stream_close(etemp.fp); if (error) { spprintf(error, 0, "phar error: unable to add new entry \"%s\" to phar \"%s\"", etemp.filename, phar->fname); } efree(ret); efree(etemp.filename); return NULL; } if (!entry) { php_stream_close(etemp.fp); efree(etemp.filename); efree(ret); return NULL; } ++(phar->refcount); ret->phar = phar; ret->fp = entry->fp; ret->position = ret->zero = 0; ret->for_write = 1; ret->is_zip = entry->is_zip; ret->is_tar = entry->is_tar; ret->internal_file = entry; return ret; } /* }}} */ /* initialize a phar_archive_data's read-only fp for existing phar data */ int phar_open_archive_fp(phar_archive_data *phar TSRMLS_DC) /* {{{ */ { if (phar_get_pharfp(phar TSRMLS_CC)) { return SUCCESS; } if (php_check_open_basedir(phar->fname TSRMLS_CC)) { return FAILURE; } phar_set_pharfp(phar, php_stream_open_wrapper(phar->fname, "rb", IGNORE_URL|STREAM_MUST_SEEK|0, NULL) TSRMLS_CC); if (!phar_get_pharfp(phar TSRMLS_CC)) { return FAILURE; } return SUCCESS; } /* }}} */ /* copy file data from an existing to a new phar_entry_info that is not in the manifest */ int phar_copy_entry_fp(phar_entry_info *source, phar_entry_info *dest, char **error TSRMLS_DC) /* {{{ */ { phar_entry_info *link; if (FAILURE == phar_open_entry_fp(source, error, 1 TSRMLS_CC)) { return FAILURE; } if (dest->link) { efree(dest->link); dest->link = NULL; dest->tar_type = (dest->is_tar ? TAR_FILE : '\0'); } dest->fp_type = PHAR_MOD; dest->offset = 0; dest->is_modified = 1; dest->fp = php_stream_fopen_tmpfile(); if (dest->fp == NULL) { spprintf(error, 0, "phar error: unable to create temporary file"); return EOF; } phar_seek_efp(source, 0, SEEK_SET, 0, 1 TSRMLS_CC); link = phar_get_link_source(source TSRMLS_CC); if (!link) { link = source; } if (SUCCESS != phar_stream_copy_to_stream(phar_get_efp(link, 0 TSRMLS_CC), dest->fp, link->uncompressed_filesize, NULL)) { php_stream_close(dest->fp); dest->fp_type = PHAR_FP; if (error) { spprintf(error, 4096, "phar error: unable to copy contents of file \"%s\" to \"%s\" in phar archive \"%s\"", source->filename, dest->filename, source->phar->fname); } return FAILURE; } return SUCCESS; } /* }}} */ /* open and decompress a compressed phar entry */ int phar_open_entry_fp(phar_entry_info *entry, char **error, int follow_links TSRMLS_DC) /* {{{ */ { php_stream_filter *filter; phar_archive_data *phar = entry->phar; char *filtername; off_t loc; php_stream *ufp; phar_entry_data dummy; if (follow_links && entry->link) { phar_entry_info *link_entry = phar_get_link_source(entry TSRMLS_CC); if (link_entry && link_entry != entry) { return phar_open_entry_fp(link_entry, error, 1 TSRMLS_CC); } } if (entry->is_modified) { return SUCCESS; } if (entry->fp_type == PHAR_TMP) { if (!entry->fp) { entry->fp = php_stream_open_wrapper(entry->tmp, "rb", STREAM_MUST_SEEK|0, NULL); } return SUCCESS; } if (entry->fp_type != PHAR_FP) { /* either newly created or already modified */ return SUCCESS; } if (!phar_get_pharfp(phar TSRMLS_CC)) { if (FAILURE == phar_open_archive_fp(phar TSRMLS_CC)) { spprintf(error, 4096, "phar error: Cannot open phar archive \"%s\" for reading", phar->fname); return FAILURE; } } if ((entry->old_flags && !(entry->old_flags & PHAR_ENT_COMPRESSION_MASK)) || !(entry->flags & PHAR_ENT_COMPRESSION_MASK)) { dummy.internal_file = entry; dummy.phar = phar; dummy.zero = entry->offset; dummy.fp = phar_get_pharfp(phar TSRMLS_CC); if (FAILURE == phar_postprocess_file(&dummy, entry->crc32, error, 1 TSRMLS_CC)) { return FAILURE; } return SUCCESS; } if (!phar_get_entrypufp(entry TSRMLS_CC)) { phar_set_entrypufp(entry, php_stream_fopen_tmpfile() TSRMLS_CC); if (!phar_get_entrypufp(entry TSRMLS_CC)) { spprintf(error, 4096, "phar error: Cannot open temporary file for decompressing phar archive \"%s\" file \"%s\"", phar->fname, entry->filename); return FAILURE; } } dummy.internal_file = entry; dummy.phar = phar; dummy.zero = entry->offset; dummy.fp = phar_get_pharfp(phar TSRMLS_CC); if (FAILURE == phar_postprocess_file(&dummy, entry->crc32, error, 1 TSRMLS_CC)) { return FAILURE; } ufp = phar_get_entrypufp(entry TSRMLS_CC); if ((filtername = phar_decompress_filter(entry, 0)) != NULL) { filter = php_stream_filter_create(filtername, NULL, 0 TSRMLS_CC); } else { filter = NULL; } if (!filter) { spprintf(error, 4096, "phar error: unable to read phar \"%s\" (cannot create %s filter while decompressing file \"%s\")", phar->fname, phar_decompress_filter(entry, 1), entry->filename); return FAILURE; } /* now we can safely use proper decompression */ /* save the new offset location within ufp */ php_stream_seek(ufp, 0, SEEK_END); loc = php_stream_tell(ufp); php_stream_filter_append(&ufp->writefilters, filter); php_stream_seek(phar_get_entrypfp(entry TSRMLS_CC), phar_get_fp_offset(entry TSRMLS_CC), SEEK_SET); if (entry->uncompressed_filesize) { if (SUCCESS != phar_stream_copy_to_stream(phar_get_entrypfp(entry TSRMLS_CC), ufp, entry->compressed_filesize, NULL)) { spprintf(error, 4096, "phar error: internal corruption of phar \"%s\" (actual filesize mismatch on file \"%s\")", phar->fname, entry->filename); php_stream_filter_remove(filter, 1 TSRMLS_CC); return FAILURE; } } php_stream_filter_flush(filter, 1); php_stream_flush(ufp); php_stream_filter_remove(filter, 1 TSRMLS_CC); if (php_stream_tell(ufp) - loc != (off_t) entry->uncompressed_filesize) { spprintf(error, 4096, "phar error: internal corruption of phar \"%s\" (actual filesize mismatch on file \"%s\")", phar->fname, entry->filename); return FAILURE; } entry->old_flags = entry->flags; /* this is now the new location of the file contents within this fp */ phar_set_fp_type(entry, PHAR_UFP, loc TSRMLS_CC); dummy.zero = entry->offset; dummy.fp = ufp; if (FAILURE == phar_postprocess_file(&dummy, entry->crc32, error, 0 TSRMLS_CC)) { return FAILURE; } return SUCCESS; } /* }}} */ int phar_create_writeable_entry(phar_archive_data *phar, phar_entry_info *entry, char **error TSRMLS_DC) /* {{{ */ { if (entry->fp_type == PHAR_MOD) { /* already newly created, truncate */ php_stream_truncate_set_size(entry->fp, 0); entry->old_flags = entry->flags; entry->is_modified = 1; phar->is_modified = 1; /* reset file size */ entry->uncompressed_filesize = 0; entry->compressed_filesize = 0; entry->crc32 = 0; entry->flags = PHAR_ENT_PERM_DEF_FILE; entry->fp_type = PHAR_MOD; entry->offset = 0; return SUCCESS; } if (error) { *error = NULL; } /* open a new temp file for writing */ if (entry->link) { efree(entry->link); entry->link = NULL; entry->tar_type = (entry->is_tar ? TAR_FILE : '\0'); } entry->fp = php_stream_fopen_tmpfile(); if (!entry->fp) { if (error) { spprintf(error, 0, "phar error: unable to create temporary file"); } return FAILURE; } entry->old_flags = entry->flags; entry->is_modified = 1; phar->is_modified = 1; /* reset file size */ entry->uncompressed_filesize = 0; entry->compressed_filesize = 0; entry->crc32 = 0; entry->flags = PHAR_ENT_PERM_DEF_FILE; entry->fp_type = PHAR_MOD; entry->offset = 0; return SUCCESS; } /* }}} */ int phar_separate_entry_fp(phar_entry_info *entry, char **error TSRMLS_DC) /* {{{ */ { php_stream *fp; phar_entry_info *link; if (FAILURE == phar_open_entry_fp(entry, error, 1 TSRMLS_CC)) { return FAILURE; } if (entry->fp_type == PHAR_MOD) { return SUCCESS; } fp = php_stream_fopen_tmpfile(); if (fp == NULL) { spprintf(error, 0, "phar error: unable to create temporary file"); return FAILURE; } phar_seek_efp(entry, 0, SEEK_SET, 0, 1 TSRMLS_CC); link = phar_get_link_source(entry TSRMLS_CC); if (!link) { link = entry; } if (SUCCESS != phar_stream_copy_to_stream(phar_get_efp(link, 0 TSRMLS_CC), fp, link->uncompressed_filesize, NULL)) { if (error) { spprintf(error, 4096, "phar error: cannot separate entry file \"%s\" contents in phar archive \"%s\" for write access", entry->filename, entry->phar->fname); } return FAILURE; } if (entry->link) { efree(entry->link); entry->link = NULL; entry->tar_type = (entry->is_tar ? TAR_FILE : '\0'); } entry->offset = 0; entry->fp = fp; entry->fp_type = PHAR_MOD; entry->is_modified = 1; return SUCCESS; } /* }}} */ /** * helper function to open an internal file's fp just-in-time */ phar_entry_info * phar_open_jit(phar_archive_data *phar, phar_entry_info *entry, char **error TSRMLS_DC) /* {{{ */ { if (error) { *error = NULL; } /* seek to start of internal file and read it */ if (FAILURE == phar_open_entry_fp(entry, error, 1 TSRMLS_CC)) { return NULL; } if (-1 == phar_seek_efp(entry, 0, SEEK_SET, 0, 1 TSRMLS_CC)) { spprintf(error, 4096, "phar error: cannot seek to start of file \"%s\" in phar \"%s\"", entry->filename, phar->fname); return NULL; } return entry; } /* }}} */ PHP_PHAR_API int phar_resolve_alias(char *alias, int alias_len, char **filename, int *filename_len TSRMLS_DC) /* {{{ */ { phar_archive_data **fd_ptr; if (PHAR_GLOBALS->phar_alias_map.arBuckets && SUCCESS == zend_hash_find(&(PHAR_GLOBALS->phar_alias_map), alias, alias_len, (void**)&fd_ptr)) { *filename = (*fd_ptr)->fname; *filename_len = (*fd_ptr)->fname_len; return SUCCESS; } return FAILURE; } /* }}} */ int phar_free_alias(phar_archive_data *phar, char *alias, int alias_len TSRMLS_DC) /* {{{ */ { if (phar->refcount || phar->is_persistent) { return FAILURE; } /* this archive has no open references, so emit an E_STRICT and remove it */ if (zend_hash_del(&(PHAR_GLOBALS->phar_fname_map), phar->fname, phar->fname_len) != SUCCESS) { return FAILURE; } /* invalidate phar cache */ PHAR_G(last_phar) = NULL; PHAR_G(last_phar_name) = PHAR_G(last_alias) = NULL; return SUCCESS; } /* }}} */ /** * Looks up a phar archive in the filename map, connecting it to the alias * (if any) or returns null */ int phar_get_archive(phar_archive_data **archive, char *fname, int fname_len, char *alias, int alias_len, char **error TSRMLS_DC) /* {{{ */ { phar_archive_data *fd, **fd_ptr; char *my_realpath, *save; int save_len; ulong fhash, ahash = 0; phar_request_initialize(TSRMLS_C); if (error) { *error = NULL; } *archive = NULL; if (PHAR_G(last_phar) && fname_len == PHAR_G(last_phar_name_len) && !memcmp(fname, PHAR_G(last_phar_name), fname_len)) { *archive = PHAR_G(last_phar); if (alias && alias_len) { if (!PHAR_G(last_phar)->is_temporary_alias && (alias_len != PHAR_G(last_phar)->alias_len || memcmp(PHAR_G(last_phar)->alias, alias, alias_len))) { if (error) { spprintf(error, 0, "alias \"%s\" is already used for archive \"%s\" cannot be overloaded with \"%s\"", alias, PHAR_G(last_phar)->fname, fname); } *archive = NULL; return FAILURE; } if (PHAR_G(last_phar)->alias_len && SUCCESS == zend_hash_find(&(PHAR_GLOBALS->phar_alias_map), PHAR_G(last_phar)->alias, PHAR_G(last_phar)->alias_len, (void**)&fd_ptr)) { zend_hash_del(&(PHAR_GLOBALS->phar_alias_map), PHAR_G(last_phar)->alias, PHAR_G(last_phar)->alias_len); } zend_hash_add(&(PHAR_GLOBALS->phar_alias_map), alias, alias_len, (void*)&(*archive), sizeof(phar_archive_data*), NULL); PHAR_G(last_alias) = alias; PHAR_G(last_alias_len) = alias_len; } return SUCCESS; } if (alias && alias_len && PHAR_G(last_phar) && alias_len == PHAR_G(last_alias_len) && !memcmp(alias, PHAR_G(last_alias), alias_len)) { fd = PHAR_G(last_phar); fd_ptr = &fd; goto alias_success; } if (alias && alias_len) { ahash = zend_inline_hash_func(alias, alias_len); if (SUCCESS == zend_hash_quick_find(&(PHAR_GLOBALS->phar_alias_map), alias, alias_len, ahash, (void**)&fd_ptr)) { alias_success: if (fname && (fname_len != (*fd_ptr)->fname_len || strncmp(fname, (*fd_ptr)->fname, fname_len))) { if (error) { spprintf(error, 0, "alias \"%s\" is already used for archive \"%s\" cannot be overloaded with \"%s\"", alias, (*fd_ptr)->fname, fname); } if (SUCCESS == phar_free_alias(*fd_ptr, alias, alias_len TSRMLS_CC)) { if (error) { efree(*error); *error = NULL; } } return FAILURE; } *archive = *fd_ptr; fd = *fd_ptr; PHAR_G(last_phar) = fd; PHAR_G(last_phar_name) = fd->fname; PHAR_G(last_phar_name_len) = fd->fname_len; PHAR_G(last_alias) = alias; PHAR_G(last_alias_len) = alias_len; return SUCCESS; } if (PHAR_G(manifest_cached) && SUCCESS == zend_hash_quick_find(&cached_alias, alias, alias_len, ahash, (void **)&fd_ptr)) { goto alias_success; } } fhash = zend_inline_hash_func(fname, fname_len); my_realpath = NULL; save = fname; save_len = fname_len; if (fname && fname_len) { if (SUCCESS == zend_hash_quick_find(&(PHAR_GLOBALS->phar_fname_map), fname, fname_len, fhash, (void**)&fd_ptr)) { *archive = *fd_ptr; fd = *fd_ptr; if (alias && alias_len) { if (!fd->is_temporary_alias && (alias_len != fd->alias_len || memcmp(fd->alias, alias, alias_len))) { if (error) { spprintf(error, 0, "alias \"%s\" is already used for archive \"%s\" cannot be overloaded with \"%s\"", alias, (*fd_ptr)->fname, fname); } return FAILURE; } if (fd->alias_len && SUCCESS == zend_hash_find(&(PHAR_GLOBALS->phar_alias_map), fd->alias, fd->alias_len, (void**)&fd_ptr)) { zend_hash_del(&(PHAR_GLOBALS->phar_alias_map), fd->alias, fd->alias_len); } zend_hash_quick_add(&(PHAR_GLOBALS->phar_alias_map), alias, alias_len, ahash, (void*)&fd, sizeof(phar_archive_data*), NULL); } PHAR_G(last_phar) = fd; PHAR_G(last_phar_name) = fd->fname; PHAR_G(last_phar_name_len) = fd->fname_len; PHAR_G(last_alias) = fd->alias; PHAR_G(last_alias_len) = fd->alias_len; return SUCCESS; } if (PHAR_G(manifest_cached) && SUCCESS == zend_hash_quick_find(&cached_phars, fname, fname_len, fhash, (void**)&fd_ptr)) { *archive = *fd_ptr; fd = *fd_ptr; /* this could be problematic - alias should never be different from manifest alias for cached phars */ if (!fd->is_temporary_alias && alias && alias_len) { if (alias_len != fd->alias_len || memcmp(fd->alias, alias, alias_len)) { if (error) { spprintf(error, 0, "alias \"%s\" is already used for archive \"%s\" cannot be overloaded with \"%s\"", alias, (*fd_ptr)->fname, fname); } return FAILURE; } } PHAR_G(last_phar) = fd; PHAR_G(last_phar_name) = fd->fname; PHAR_G(last_phar_name_len) = fd->fname_len; PHAR_G(last_alias) = fd->alias; PHAR_G(last_alias_len) = fd->alias_len; return SUCCESS; } if (SUCCESS == zend_hash_quick_find(&(PHAR_GLOBALS->phar_alias_map), save, save_len, fhash, (void**)&fd_ptr)) { fd = *archive = *fd_ptr; PHAR_G(last_phar) = fd; PHAR_G(last_phar_name) = fd->fname; PHAR_G(last_phar_name_len) = fd->fname_len; PHAR_G(last_alias) = fd->alias; PHAR_G(last_alias_len) = fd->alias_len; return SUCCESS; } if (PHAR_G(manifest_cached) && SUCCESS == zend_hash_quick_find(&cached_alias, save, save_len, fhash, (void**)&fd_ptr)) { fd = *archive = *fd_ptr; PHAR_G(last_phar) = fd; PHAR_G(last_phar_name) = fd->fname; PHAR_G(last_phar_name_len) = fd->fname_len; PHAR_G(last_alias) = fd->alias; PHAR_G(last_alias_len) = fd->alias_len; return SUCCESS; } /* not found, try converting \ to / */ my_realpath = expand_filepath(fname, my_realpath TSRMLS_CC); if (my_realpath) { fname_len = strlen(my_realpath); fname = my_realpath; } else { return FAILURE; } #ifdef PHP_WIN32 phar_unixify_path_separators(fname, fname_len); #endif fhash = zend_inline_hash_func(fname, fname_len); if (SUCCESS == zend_hash_quick_find(&(PHAR_GLOBALS->phar_fname_map), fname, fname_len, fhash, (void**)&fd_ptr)) { realpath_success: *archive = *fd_ptr; fd = *fd_ptr; if (alias && alias_len) { zend_hash_quick_add(&(PHAR_GLOBALS->phar_alias_map), alias, alias_len, ahash, (void*)&fd, sizeof(phar_archive_data*), NULL); } efree(my_realpath); PHAR_G(last_phar) = fd; PHAR_G(last_phar_name) = fd->fname; PHAR_G(last_phar_name_len) = fd->fname_len; PHAR_G(last_alias) = fd->alias; PHAR_G(last_alias_len) = fd->alias_len; return SUCCESS; } if (PHAR_G(manifest_cached) && SUCCESS == zend_hash_quick_find(&cached_phars, fname, fname_len, fhash, (void**)&fd_ptr)) { goto realpath_success; } efree(my_realpath); } return FAILURE; } /* }}} */ /** * Determine which stream compression filter (if any) we need to read this file */ char * phar_compress_filter(phar_entry_info * entry, int return_unknown) /* {{{ */ { switch (entry->flags & PHAR_ENT_COMPRESSION_MASK) { case PHAR_ENT_COMPRESSED_GZ: return "zlib.deflate"; case PHAR_ENT_COMPRESSED_BZ2: return "bzip2.compress"; default: return return_unknown ? "unknown" : NULL; } } /* }}} */ /** * Determine which stream decompression filter (if any) we need to read this file */ char * phar_decompress_filter(phar_entry_info * entry, int return_unknown) /* {{{ */ { php_uint32 flags; if (entry->is_modified) { flags = entry->old_flags; } else { flags = entry->flags; } switch (flags & PHAR_ENT_COMPRESSION_MASK) { case PHAR_ENT_COMPRESSED_GZ: return "zlib.inflate"; case PHAR_ENT_COMPRESSED_BZ2: return "bzip2.decompress"; default: return return_unknown ? "unknown" : NULL; } } /* }}} */ /** * retrieve information on a file contained within a phar, or null if it ain't there */ phar_entry_info *phar_get_entry_info(phar_archive_data *phar, char *path, int path_len, char **error, int security TSRMLS_DC) /* {{{ */ { return phar_get_entry_info_dir(phar, path, path_len, 0, error, security TSRMLS_CC); } /* }}} */ /** * retrieve information on a file or directory contained within a phar, or null if none found * allow_dir is 0 for none, 1 for both empty directories in the phar and temp directories, and 2 for only * valid pre-existing empty directory entries */ phar_entry_info *phar_get_entry_info_dir(phar_archive_data *phar, char *path, int path_len, char dir, char **error, int security TSRMLS_DC) /* {{{ */ { const char *pcr_error; phar_entry_info *entry; int is_dir; #ifdef PHP_WIN32 phar_unixify_path_separators(path, path_len); #endif is_dir = (path_len && (path[path_len - 1] == '/')) ? 1 : 0; if (error) { *error = NULL; } if (security && path_len >= sizeof(".phar")-1 && !memcmp(path, ".phar", sizeof(".phar")-1)) { if (error) { spprintf(error, 4096, "phar error: cannot directly access magic \".phar\" directory or files within it"); } return NULL; } if (!path_len && !dir) { if (error) { spprintf(error, 4096, "phar error: invalid path \"%s\" must not be empty", path); } return NULL; } if (phar_path_check(&path, &path_len, &pcr_error) > pcr_is_ok) { if (error) { spprintf(error, 4096, "phar error: invalid path \"%s\" contains %s", path, pcr_error); } return NULL; } if (!phar->manifest.arBuckets) { return NULL; } if (is_dir) { if (!path_len || path_len == 1) { return NULL; } path_len--; } if (SUCCESS == zend_hash_find(&phar->manifest, path, path_len, (void**)&entry)) { if (entry->is_deleted) { /* entry is deleted, but has not been flushed to disk yet */ return NULL; } if (entry->is_dir && !dir) { if (error) { spprintf(error, 4096, "phar error: path \"%s\" is a directory", path); } return NULL; } if (!entry->is_dir && dir == 2) { /* user requested a directory, we must return one */ if (error) { spprintf(error, 4096, "phar error: path \"%s\" exists and is a not a directory", path); } return NULL; } return entry; } if (dir) { if (zend_hash_exists(&phar->virtual_dirs, path, path_len)) { /* a file or directory exists in a sub-directory of this path */ entry = (phar_entry_info *) ecalloc(1, sizeof(phar_entry_info)); /* this next line tells PharFileInfo->__destruct() to efree the filename */ entry->is_temp_dir = entry->is_dir = 1; entry->filename = (char *) estrndup(path, path_len + 1); entry->filename_len = path_len; entry->phar = phar; return entry; } } if (phar->mounted_dirs.arBuckets && zend_hash_num_elements(&phar->mounted_dirs)) { phar_zstr key; char *str_key; ulong unused; uint keylen; zend_hash_internal_pointer_reset(&phar->mounted_dirs); while (FAILURE != zend_hash_has_more_elements(&phar->mounted_dirs)) { if (HASH_KEY_NON_EXISTENT == zend_hash_get_current_key_ex(&phar->mounted_dirs, &key, &keylen, &unused, 0, NULL)) { break; } PHAR_STR(key, str_key); if ((int)keylen >= path_len || strncmp(str_key, path, keylen)) { PHAR_STR_FREE(str_key); continue; } else { char *test; int test_len; php_stream_statbuf ssb; if (SUCCESS != zend_hash_find(&phar->manifest, str_key, keylen, (void **) &entry)) { if (error) { spprintf(error, 4096, "phar internal error: mounted path \"%s\" could not be retrieved from manifest", str_key); } PHAR_STR_FREE(str_key); return NULL; } if (!entry->tmp || !entry->is_mounted) { if (error) { spprintf(error, 4096, "phar internal error: mounted path \"%s\" is not properly initialized as a mounted path", str_key); } PHAR_STR_FREE(str_key); return NULL; } PHAR_STR_FREE(str_key); test_len = spprintf(&test, MAXPATHLEN, "%s%s", entry->tmp, path + keylen); if (SUCCESS != php_stream_stat_path(test, &ssb)) { efree(test); return NULL; } if (ssb.sb.st_mode & S_IFDIR && !dir) { efree(test); if (error) { spprintf(error, 4096, "phar error: path \"%s\" is a directory", path); } return NULL; } if ((ssb.sb.st_mode & S_IFDIR) == 0 && dir) { efree(test); /* user requested a directory, we must return one */ if (error) { spprintf(error, 4096, "phar error: path \"%s\" exists and is a not a directory", path); } return NULL; } /* mount the file just in time */ if (SUCCESS != phar_mount_entry(phar, test, test_len, path, path_len TSRMLS_CC)) { efree(test); if (error) { spprintf(error, 4096, "phar error: path \"%s\" exists as file \"%s\" and could not be mounted", path, test); } return NULL; } efree(test); if (SUCCESS != zend_hash_find(&phar->manifest, path, path_len, (void**)&entry)) { if (error) { spprintf(error, 4096, "phar error: path \"%s\" exists as file \"%s\" and could not be retrieved after being mounted", path, test); } return NULL; } return entry; } } } return NULL; } /* }}} */ static const char hexChars[] = "0123456789ABCDEF"; static int phar_hex_str(const char *digest, size_t digest_len, char **signature TSRMLS_DC) /* {{{ */ { int pos = -1; size_t len = 0; *signature = (char*)safe_pemalloc(digest_len, 2, 1, PHAR_G(persist)); for (; len < digest_len; ++len) { (*signature)[++pos] = hexChars[((const unsigned char *)digest)[len] >> 4]; (*signature)[++pos] = hexChars[((const unsigned char *)digest)[len] & 0x0F]; } (*signature)[++pos] = '\0'; return pos; } /* }}} */ #ifndef PHAR_HAVE_OPENSSL static int phar_call_openssl_signverify(int is_sign, php_stream *fp, off_t end, char *key, int key_len, char **signature, int *signature_len TSRMLS_DC) /* {{{ */ { zend_fcall_info fci; zend_fcall_info_cache fcc; zval *zdata, *zsig, *zkey, *retval_ptr, **zp[3], *openssl; MAKE_STD_ZVAL(zdata); MAKE_STD_ZVAL(openssl); ZVAL_STRINGL(openssl, is_sign ? "openssl_sign" : "openssl_verify", is_sign ? sizeof("openssl_sign")-1 : sizeof("openssl_verify")-1, 1); MAKE_STD_ZVAL(zsig); ZVAL_STRINGL(zsig, *signature, *signature_len, 1); MAKE_STD_ZVAL(zkey); ZVAL_STRINGL(zkey, key, key_len, 1); zp[0] = &zdata; zp[1] = &zsig; zp[2] = &zkey; php_stream_rewind(fp); Z_TYPE_P(zdata) = IS_STRING; Z_STRLEN_P(zdata) = end; if (end != (off_t) php_stream_copy_to_mem(fp, &(Z_STRVAL_P(zdata)), (size_t) end, 0)) { zval_dtor(zdata); zval_dtor(zsig); zval_dtor(zkey); zval_dtor(openssl); efree(openssl); efree(zdata); efree(zkey); efree(zsig); return FAILURE; } if (FAILURE == zend_fcall_info_init(openssl, 0, &fci, &fcc, NULL, NULL TSRMLS_CC)) { zval_dtor(zdata); zval_dtor(zsig); zval_dtor(zkey); zval_dtor(openssl); efree(openssl); efree(zdata); efree(zkey); efree(zsig); return FAILURE; } fci.param_count = 3; fci.params = zp; Z_ADDREF_P(zdata); if (is_sign) { Z_SET_ISREF_P(zsig); } else { Z_ADDREF_P(zsig); } Z_ADDREF_P(zkey); fci.retval_ptr_ptr = &retval_ptr; if (FAILURE == zend_call_function(&fci, &fcc TSRMLS_CC)) { zval_dtor(zdata); zval_dtor(zsig); zval_dtor(zkey); zval_dtor(openssl); efree(openssl); efree(zdata); efree(zkey); efree(zsig); return FAILURE; } zval_dtor(openssl); efree(openssl); Z_DELREF_P(zdata); if (is_sign) { Z_UNSET_ISREF_P(zsig); } else { Z_DELREF_P(zsig); } Z_DELREF_P(zkey); zval_dtor(zdata); efree(zdata); zval_dtor(zkey); efree(zkey); switch (Z_TYPE_P(retval_ptr)) { default: case IS_LONG: zval_dtor(zsig); efree(zsig); if (1 == Z_LVAL_P(retval_ptr)) { efree(retval_ptr); return SUCCESS; } efree(retval_ptr); return FAILURE; case IS_BOOL: efree(retval_ptr); if (Z_BVAL_P(retval_ptr)) { *signature = estrndup(Z_STRVAL_P(zsig), Z_STRLEN_P(zsig)); *signature_len = Z_STRLEN_P(zsig); zval_dtor(zsig); efree(zsig); return SUCCESS; } zval_dtor(zsig); efree(zsig); return FAILURE; } } /* }}} */ #endif /* #ifndef PHAR_HAVE_OPENSSL */ int phar_verify_signature(php_stream *fp, size_t end_of_phar, php_uint32 sig_type, char *sig, int sig_len, char *fname, char **signature, int *signature_len, char **error TSRMLS_DC) /* {{{ */ { int read_size, len; off_t read_len; unsigned char buf[1024]; php_stream_rewind(fp); switch (sig_type) { case PHAR_SIG_OPENSSL: { #ifdef PHAR_HAVE_OPENSSL BIO *in; EVP_PKEY *key; EVP_MD *mdtype = (EVP_MD *) EVP_sha1(); EVP_MD_CTX md_ctx; #else int tempsig; #endif php_uint32 pubkey_len; char *pubkey = NULL, *pfile; php_stream *pfp; #ifndef PHAR_HAVE_OPENSSL if (!zend_hash_exists(&module_registry, "openssl", sizeof("openssl"))) { if (error) { spprintf(error, 0, "openssl not loaded"); } return FAILURE; } #endif /* use __FILE__ . '.pubkey' for public key file */ spprintf(&pfile, 0, "%s.pubkey", fname); pfp = php_stream_open_wrapper(pfile, "rb", 0, NULL); efree(pfile); #if PHP_MAJOR_VERSION > 5 if (!pfp || !(pubkey_len = php_stream_copy_to_mem(pfp, (void **) &pubkey, PHP_STREAM_COPY_ALL, 0)) || !pubkey) { #else if (!pfp || !(pubkey_len = php_stream_copy_to_mem(pfp, &pubkey, PHP_STREAM_COPY_ALL, 0)) || !pubkey) { #endif if (pfp) { php_stream_close(pfp); } if (error) { spprintf(error, 0, "openssl public key could not be read"); } return FAILURE; } php_stream_close(pfp); #ifndef PHAR_HAVE_OPENSSL tempsig = sig_len; if (FAILURE == phar_call_openssl_signverify(0, fp, end_of_phar, pubkey, pubkey_len, &sig, &tempsig TSRMLS_CC)) { if (pubkey) { efree(pubkey); } if (error) { spprintf(error, 0, "openssl signature could not be verified"); } return FAILURE; } if (pubkey) { efree(pubkey); } sig_len = tempsig; #else in = BIO_new_mem_buf(pubkey, pubkey_len); if (NULL == in) { efree(pubkey); if (error) { spprintf(error, 0, "openssl signature could not be processed"); } return FAILURE; } key = PEM_read_bio_PUBKEY(in, NULL,NULL, NULL); BIO_free(in); efree(pubkey); if (NULL == key) { if (error) { spprintf(error, 0, "openssl signature could not be processed"); } return FAILURE; } EVP_VerifyInit(&md_ctx, mdtype); read_len = end_of_phar; if (read_len > sizeof(buf)) { read_size = sizeof(buf); } else { read_size = (int)read_len; } php_stream_seek(fp, 0, SEEK_SET); while (read_size && (len = php_stream_read(fp, (char*)buf, read_size)) > 0) { EVP_VerifyUpdate (&md_ctx, buf, len); read_len -= (off_t)len; if (read_len < read_size) { read_size = (int)read_len; } } if (EVP_VerifyFinal(&md_ctx, (unsigned char *)sig, sig_len, key) != 1) { /* 1: signature verified, 0: signature does not match, -1: failed signature operation */ EVP_MD_CTX_cleanup(&md_ctx); if (error) { spprintf(error, 0, "broken openssl signature"); } return FAILURE; } EVP_MD_CTX_cleanup(&md_ctx); #endif *signature_len = phar_hex_str((const char*)sig, sig_len, signature TSRMLS_CC); } break; #ifdef PHAR_HASH_OK case PHAR_SIG_SHA512: { unsigned char digest[64]; PHP_SHA512_CTX context; PHP_SHA512Init(&context); read_len = end_of_phar; if (read_len > sizeof(buf)) { read_size = sizeof(buf); } else { read_size = (int)read_len; } while ((len = php_stream_read(fp, (char*)buf, read_size)) > 0) { PHP_SHA512Update(&context, buf, len); read_len -= (off_t)len; if (read_len < read_size) { read_size = (int)read_len; } } PHP_SHA512Final(digest, &context); if (memcmp(digest, sig, sizeof(digest))) { if (error) { spprintf(error, 0, "broken signature"); } return FAILURE; } *signature_len = phar_hex_str((const char*)digest, sizeof(digest), signature TSRMLS_CC); break; } case PHAR_SIG_SHA256: { unsigned char digest[32]; PHP_SHA256_CTX context; PHP_SHA256Init(&context); read_len = end_of_phar; if (read_len > sizeof(buf)) { read_size = sizeof(buf); } else { read_size = (int)read_len; } while ((len = php_stream_read(fp, (char*)buf, read_size)) > 0) { PHP_SHA256Update(&context, buf, len); read_len -= (off_t)len; if (read_len < read_size) { read_size = (int)read_len; } } PHP_SHA256Final(digest, &context); if (memcmp(digest, sig, sizeof(digest))) { if (error) { spprintf(error, 0, "broken signature"); } return FAILURE; } *signature_len = phar_hex_str((const char*)digest, sizeof(digest), signature TSRMLS_CC); break; } #else case PHAR_SIG_SHA512: case PHAR_SIG_SHA256: if (error) { spprintf(error, 0, "unsupported signature"); } return FAILURE; #endif case PHAR_SIG_SHA1: { unsigned char digest[20]; PHP_SHA1_CTX context; PHP_SHA1Init(&context); read_len = end_of_phar; if (read_len > sizeof(buf)) { read_size = sizeof(buf); } else { read_size = (int)read_len; } while ((len = php_stream_read(fp, (char*)buf, read_size)) > 0) { PHP_SHA1Update(&context, buf, len); read_len -= (off_t)len; if (read_len < read_size) { read_size = (int)read_len; } } PHP_SHA1Final(digest, &context); if (memcmp(digest, sig, sizeof(digest))) { if (error) { spprintf(error, 0, "broken signature"); } return FAILURE; } *signature_len = phar_hex_str((const char*)digest, sizeof(digest), signature TSRMLS_CC); break; } case PHAR_SIG_MD5: { unsigned char digest[16]; PHP_MD5_CTX context; PHP_MD5Init(&context); read_len = end_of_phar; if (read_len > sizeof(buf)) { read_size = sizeof(buf); } else { read_size = (int)read_len; } while ((len = php_stream_read(fp, (char*)buf, read_size)) > 0) { PHP_MD5Update(&context, buf, len); read_len -= (off_t)len; if (read_len < read_size) { read_size = (int)read_len; } } PHP_MD5Final(digest, &context); if (memcmp(digest, sig, sizeof(digest))) { if (error) { spprintf(error, 0, "broken signature"); } return FAILURE; } *signature_len = phar_hex_str((const char*)digest, sizeof(digest), signature TSRMLS_CC); break; } default: if (error) { spprintf(error, 0, "broken or unsupported signature"); } return FAILURE; } return SUCCESS; } /* }}} */
164,573
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) { if (!vcpu->arch.time_page) return -EINVAL; vcpu->arch.pvclock_set_guest_stopped_request = true; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); return 0; } Commit Message: KVM: x86: Convert MSR_KVM_SYSTEM_TIME to use gfn_to_hva_cache functions (CVE-2013-1797) There is a potential use after free issue with the handling of MSR_KVM_SYSTEM_TIME. If the guest specifies a GPA in a movable or removable memory such as frame buffers then KVM might continue to write to that address even after it's removed via KVM_SET_USER_MEMORY_REGION. KVM pins the page in memory so it's unlikely to cause an issue, but if the user space component re-purposes the memory previously used for the guest, then the guest will be able to corrupt that memory. Tested: Tested against kvmclock unit test Signed-off-by: Andrew Honig <[email protected]> Signed-off-by: Marcelo Tosatti <[email protected]> CWE ID: CWE-399
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) { if (!vcpu->arch.pv_time_enabled) return -EINVAL; vcpu->arch.pvclock_set_guest_stopped_request = true; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); return 0; }
166,117
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool RenderFrameDevToolsAgentHost::AttachSession(DevToolsSession* session) { if (session->restricted() && !IsFrameHostAllowedForRestrictedSessions()) return false; session->SetRenderer(frame_host_ ? frame_host_->GetProcess()->GetID() : ChildProcessHost::kInvalidUniqueID, frame_host_); protocol::EmulationHandler* emulation_handler = new protocol::EmulationHandler(); session->AddHandler(base::WrapUnique(new protocol::BrowserHandler())); session->AddHandler(base::WrapUnique(new protocol::DOMHandler())); session->AddHandler(base::WrapUnique(emulation_handler)); session->AddHandler(base::WrapUnique(new protocol::InputHandler())); session->AddHandler(base::WrapUnique(new protocol::InspectorHandler())); session->AddHandler(base::WrapUnique(new protocol::IOHandler( GetIOContext()))); session->AddHandler(base::WrapUnique(new protocol::MemoryHandler())); session->AddHandler(base::WrapUnique(new protocol::NetworkHandler(GetId()))); session->AddHandler(base::WrapUnique(new protocol::SchemaHandler())); session->AddHandler(base::WrapUnique(new protocol::ServiceWorkerHandler())); session->AddHandler(base::WrapUnique(new protocol::StorageHandler())); session->AddHandler( base::WrapUnique(new protocol::TargetHandler(false /* browser_only */))); session->AddHandler(base::WrapUnique(new protocol::TracingHandler( protocol::TracingHandler::Renderer, frame_tree_node_ ? frame_tree_node_->frame_tree_node_id() : 0, GetIOContext()))); session->AddHandler( base::WrapUnique(new protocol::PageHandler(emulation_handler))); session->AddHandler(base::WrapUnique(new protocol::SecurityHandler())); if (EnsureAgent()) session->AttachToAgent(agent_ptr_); if (sessions().size() == 1) { if (!base::FeatureList::IsEnabled(features::kVizDisplayCompositor) && !base::FeatureList::IsEnabled( features::kUseVideoCaptureApiForDevToolsSnapshots)) { frame_trace_recorder_.reset(new DevToolsFrameTraceRecorder()); } GrantPolicy(); #if defined(OS_ANDROID) GetWakeLock()->RequestWakeLock(); #endif } return true; } Commit Message: [DevTools] Do not create target handler for restricted sessions Bug: 805224 Change-Id: I08528e44e79d0a097cfe72ab4949dda538efd098 Reviewed-on: https://chromium-review.googlesource.com/988695 Reviewed-by: Pavel Feldman <[email protected]> Commit-Queue: Dmitry Gozman <[email protected]> Cr-Commit-Position: refs/heads/master@{#547496} CWE ID: CWE-20
bool RenderFrameDevToolsAgentHost::AttachSession(DevToolsSession* session) { if (session->restricted() && !IsFrameHostAllowedForRestrictedSessions()) return false; session->SetRenderer(frame_host_ ? frame_host_->GetProcess()->GetID() : ChildProcessHost::kInvalidUniqueID, frame_host_); protocol::EmulationHandler* emulation_handler = new protocol::EmulationHandler(); session->AddHandler(base::WrapUnique(new protocol::BrowserHandler())); session->AddHandler(base::WrapUnique(new protocol::DOMHandler())); session->AddHandler(base::WrapUnique(emulation_handler)); session->AddHandler(base::WrapUnique(new protocol::InputHandler())); session->AddHandler(base::WrapUnique(new protocol::InspectorHandler())); session->AddHandler(base::WrapUnique(new protocol::IOHandler( GetIOContext()))); session->AddHandler(base::WrapUnique(new protocol::MemoryHandler())); session->AddHandler(base::WrapUnique(new protocol::NetworkHandler(GetId()))); session->AddHandler(base::WrapUnique(new protocol::SchemaHandler())); session->AddHandler(base::WrapUnique(new protocol::ServiceWorkerHandler())); session->AddHandler(base::WrapUnique(new protocol::StorageHandler())); if (!session->restricted()) { session->AddHandler(base::WrapUnique( new protocol::TargetHandler(false /* browser_only */))); } session->AddHandler(base::WrapUnique(new protocol::TracingHandler( protocol::TracingHandler::Renderer, frame_tree_node_ ? frame_tree_node_->frame_tree_node_id() : 0, GetIOContext()))); session->AddHandler( base::WrapUnique(new protocol::PageHandler(emulation_handler))); session->AddHandler(base::WrapUnique(new protocol::SecurityHandler())); if (EnsureAgent()) session->AttachToAgent(agent_ptr_); if (sessions().size() == 1) { if (!base::FeatureList::IsEnabled(features::kVizDisplayCompositor) && !base::FeatureList::IsEnabled( features::kUseVideoCaptureApiForDevToolsSnapshots)) { frame_trace_recorder_.reset(new DevToolsFrameTraceRecorder()); } GrantPolicy(); #if defined(OS_ANDROID) GetWakeLock()->RequestWakeLock(); #endif } return true; }
173,235
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) { struct sk_buff *skb = (struct sk_buff *)(long) ctx; struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = (struct nlattr *) &skb->data[A]; if (nla->nla_len > A - skb->len) return 0; nla = nla_find_nested(nla, X); if (nla) return (void *) nla - (void *) skb->data; return 0; } Commit Message: filter: prevent nla extensions to peek beyond the end of the message The BPF_S_ANC_NLATTR and BPF_S_ANC_NLATTR_NEST extensions fail to check for a minimal message length before testing the supplied offset to be within the bounds of the message. This allows the subtraction of the nla header to underflow and therefore -- as the data type is unsigned -- allowing far to big offset and length values for the search of the netlink attribute. The remainder calculation for the BPF_S_ANC_NLATTR_NEST extension is also wrong. It has the minuend and subtrahend mixed up, therefore calculates a huge length value, allowing to overrun the end of the message while looking for the netlink attribute. The following three BPF snippets will trigger the bugs when attached to a UNIX datagram socket and parsing a message with length 1, 2 or 3. ,-[ PoC for missing size check in BPF_S_ANC_NLATTR ]-- | ld #0x87654321 | ldx #42 | ld #nla | ret a `--- ,-[ PoC for the same bug in BPF_S_ANC_NLATTR_NEST ]-- | ld #0x87654321 | ldx #42 | ld #nlan | ret a `--- ,-[ PoC for wrong remainder calculation in BPF_S_ANC_NLATTR_NEST ]-- | ; (needs a fake netlink header at offset 0) | ld #0 | ldx #42 | ld #nlan | ret a `--- Fix the first issue by ensuring the message length fulfills the minimal size constrains of a nla header. Fix the second bug by getting the math for the remainder calculation right. Fixes: 4738c1db15 ("[SKFILTER]: Add SKF_ADF_NLATTR instruction") Fixes: d214c7537b ("filter: add SKF_AD_NLATTR_NEST to look for nested..") Cc: Patrick McHardy <[email protected]> Cc: Pablo Neira Ayuso <[email protected]> Signed-off-by: Mathias Krause <[email protected]> Acked-by: Daniel Borkmann <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-189
static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) { struct sk_buff *skb = (struct sk_buff *)(long) ctx; struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (skb->len < sizeof(struct nlattr)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = (struct nlattr *) &skb->data[A]; if (nla->nla_len > skb->len - A) return 0; nla = nla_find_nested(nla, X); if (nla) return (void *) nla - (void *) skb->data; return 0; }
166,384
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: push_decoder_state (DECODER_STATE ds) { if (ds->idx >= ds->stacksize) { fprintf (stderr, "ERROR: decoder stack overflow!\n"); abort (); } ds->stack[ds->idx++] = ds->cur; } Commit Message: CWE ID: CWE-20
push_decoder_state (DECODER_STATE ds) { if (ds->idx >= ds->stacksize) { fprintf (stderr, "ksba: ber-decoder: stack overflow!\n"); return gpg_error (GPG_ERR_LIMIT_REACHED); } ds->stack[ds->idx++] = ds->cur; return 0; }
165,052
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: white_point(PNG_CONST color_encoding *encoding) { CIE_color white; white.X = encoding->red.X + encoding->green.X + encoding->blue.X; white.Y = encoding->red.Y + encoding->green.Y + encoding->blue.Y; white.Z = encoding->red.Z + encoding->green.Z + encoding->blue.Z; return white; } Commit Message: DO NOT MERGE Update libpng to 1.6.20 BUG:23265085 Change-Id: I85199805636d771f3597b691b63bc0bf46084833 (cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82) CWE ID:
white_point(PNG_CONST color_encoding *encoding) white_point(const color_encoding *encoding) { CIE_color white; white.X = encoding->red.X + encoding->green.X + encoding->blue.X; white.Y = encoding->red.Y + encoding->green.Y + encoding->blue.Y; white.Z = encoding->red.Z + encoding->green.Z + encoding->blue.Z; return white; }
173,718
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void request_key_auth_describe(const struct key *key, struct seq_file *m) { struct request_key_auth *rka = key->payload.data[0]; seq_puts(m, "key:"); seq_puts(m, key->description); if (key_is_instantiated(key)) seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len); } Commit Message: KEYS: Fix race between updating and finding a negative key Consolidate KEY_FLAG_INSTANTIATED, KEY_FLAG_NEGATIVE and the rejection error into one field such that: (1) The instantiation state can be modified/read atomically. (2) The error can be accessed atomically with the state. (3) The error isn't stored unioned with the payload pointers. This deals with the problem that the state is spread over three different objects (two bits and a separate variable) and reading or updating them atomically isn't practical, given that not only can uninstantiated keys change into instantiated or rejected keys, but rejected keys can also turn into instantiated keys - and someone accessing the key might not be using any locking. The main side effect of this problem is that what was held in the payload may change, depending on the state. For instance, you might observe the key to be in the rejected state. You then read the cached error, but if the key semaphore wasn't locked, the key might've become instantiated between the two reads - and you might now have something in hand that isn't actually an error code. The state is now KEY_IS_UNINSTANTIATED, KEY_IS_POSITIVE or a negative error code if the key is negatively instantiated. The key_is_instantiated() function is replaced with key_is_positive() to avoid confusion as negative keys are also 'instantiated'. Additionally, barriering is included: (1) Order payload-set before state-set during instantiation. (2) Order state-read before payload-read when using the key. Further separate barriering is necessary if RCU is being used to access the payload content after reading the payload pointers. Fixes: 146aa8b1453b ("KEYS: Merge the type-specific data with the payload data") Cc: [email protected] # v4.4+ Reported-by: Eric Biggers <[email protected]> Signed-off-by: David Howells <[email protected]> Reviewed-by: Eric Biggers <[email protected]> CWE ID: CWE-20
static void request_key_auth_describe(const struct key *key, struct seq_file *m) { struct request_key_auth *rka = key->payload.data[0]; seq_puts(m, "key:"); seq_puts(m, key->description); if (key_is_positive(key)) seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len); }
167,707
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock_iocb *siocb = kiocb_to_siocb(kiocb); struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); struct sockaddr_nl *addr = msg->msg_name; u32 dst_pid; u32 dst_group; struct sk_buff *skb; int err; struct scm_cookie scm; if (msg->msg_flags&MSG_OOB) return -EOPNOTSUPP; if (NULL == siocb->scm) siocb->scm = &scm; err = scm_send(sock, msg, siocb->scm, true); if (err < 0) return err; if (msg->msg_namelen) { err = -EINVAL; if (addr->nl_family != AF_NETLINK) goto out; dst_pid = addr->nl_pid; dst_group = ffs(addr->nl_groups); err = -EPERM; if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND)) goto out; } else { dst_pid = nlk->dst_pid; dst_group = nlk->dst_group; } if (!nlk->pid) { err = netlink_autobind(sock); if (err) goto out; } err = -EMSGSIZE; if (len > sk->sk_sndbuf - 32) goto out; err = -ENOBUFS; skb = alloc_skb(len, GFP_KERNEL); if (skb == NULL) goto out; NETLINK_CB(skb).pid = nlk->pid; NETLINK_CB(skb).dst_group = dst_group; memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); err = -EFAULT; if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { kfree_skb(skb); goto out; } err = security_netlink_send(sk, skb); if (err) { kfree_skb(skb); goto out; } if (dst_group) { atomic_inc(&skb->users); netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL); } err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT); out: scm_destroy(siocb->scm); return err; } Commit Message: netlink: fix possible spoofing from non-root processes Non-root user-space processes can send Netlink messages to other processes that are well-known for being subscribed to Netlink asynchronous notifications. This allows ilegitimate non-root process to send forged messages to Netlink subscribers. The userspace process usually verifies the legitimate origin in two ways: a) Socket credentials. If UID != 0, then the message comes from some ilegitimate process and the message needs to be dropped. b) Netlink portID. In general, portID == 0 means that the origin of the messages comes from the kernel. Thus, discarding any message not coming from the kernel. However, ctnetlink sets the portID in event messages that has been triggered by some user-space process, eg. conntrack utility. So other processes subscribed to ctnetlink events, eg. conntrackd, know that the event was triggered by some user-space action. Neither of the two ways to discard ilegitimate messages coming from non-root processes can help for ctnetlink. This patch adds capability validation in case that dst_pid is set in netlink_sendmsg(). This approach is aggressive since existing applications using any Netlink bus to deliver messages between two user-space processes will break. Note that the exception is NETLINK_USERSOCK, since it is reserved for netlink-to-netlink userspace communication. Still, if anyone wants that his Netlink bus allows netlink-to-netlink userspace, then they can set NL_NONROOT_SEND. However, by default, I don't think it makes sense to allow to use NETLINK_ROUTE to communicate two processes that are sending no matter what information that is not related to link/neighbouring/routing. They should be using NETLINK_USERSOCK instead for that. Signed-off-by: Pablo Neira Ayuso <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-284
static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock_iocb *siocb = kiocb_to_siocb(kiocb); struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); struct sockaddr_nl *addr = msg->msg_name; u32 dst_pid; u32 dst_group; struct sk_buff *skb; int err; struct scm_cookie scm; if (msg->msg_flags&MSG_OOB) return -EOPNOTSUPP; if (NULL == siocb->scm) siocb->scm = &scm; err = scm_send(sock, msg, siocb->scm, true); if (err < 0) return err; if (msg->msg_namelen) { err = -EINVAL; if (addr->nl_family != AF_NETLINK) goto out; dst_pid = addr->nl_pid; dst_group = ffs(addr->nl_groups); err = -EPERM; if ((dst_group || dst_pid) && !netlink_capable(sock, NL_NONROOT_SEND)) goto out; } else { dst_pid = nlk->dst_pid; dst_group = nlk->dst_group; } if (!nlk->pid) { err = netlink_autobind(sock); if (err) goto out; } err = -EMSGSIZE; if (len > sk->sk_sndbuf - 32) goto out; err = -ENOBUFS; skb = alloc_skb(len, GFP_KERNEL); if (skb == NULL) goto out; NETLINK_CB(skb).pid = nlk->pid; NETLINK_CB(skb).dst_group = dst_group; memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); err = -EFAULT; if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { kfree_skb(skb); goto out; } err = security_netlink_send(sk, skb); if (err) { kfree_skb(skb); goto out; } if (dst_group) { atomic_inc(&skb->users); netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL); } err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT); out: scm_destroy(siocb->scm); return err; }
167,615
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void BrowserViewRenderer::ScrollTo(gfx::Vector2d scroll_offset) { gfx::Vector2d max_offset = max_scroll_offset(); gfx::Vector2dF scroll_offset_dip; if (max_offset.x()) { scroll_offset_dip.set_x((scroll_offset.x() * max_scroll_offset_dip_.x()) / max_offset.x()); } if (max_offset.y()) { scroll_offset_dip.set_y((scroll_offset.y() * max_scroll_offset_dip_.y()) / max_offset.y()); } DCHECK_LE(0.f, scroll_offset_dip.x()); DCHECK_LE(0.f, scroll_offset_dip.y()); DCHECK(scroll_offset_dip.x() < max_scroll_offset_dip_.x() || scroll_offset_dip.x() - max_scroll_offset_dip_.x() < kEpsilon) << scroll_offset_dip.x() << " " << max_scroll_offset_dip_.x(); DCHECK(scroll_offset_dip.y() < max_scroll_offset_dip_.y() || scroll_offset_dip.y() - max_scroll_offset_dip_.y() < kEpsilon) << scroll_offset_dip.y() << " " << max_scroll_offset_dip_.y(); if (scroll_offset_dip_ == scroll_offset_dip) return; scroll_offset_dip_ = scroll_offset_dip; TRACE_EVENT_INSTANT2("android_webview", "BrowserViewRenderer::ScrollTo", TRACE_EVENT_SCOPE_THREAD, "x", scroll_offset_dip.x(), "y", scroll_offset_dip.y()); if (compositor_) { compositor_->DidChangeRootLayerScrollOffset( gfx::ScrollOffset(scroll_offset_dip_)); } } Commit Message: sync compositor: pass simple gfx types by const ref See bug for reasoning BUG=159273 Review URL: https://codereview.chromium.org/1417893006 Cr-Commit-Position: refs/heads/master@{#356653} CWE ID: CWE-399
void BrowserViewRenderer::ScrollTo(gfx::Vector2d scroll_offset) { void BrowserViewRenderer::ScrollTo(const gfx::Vector2d& scroll_offset) { gfx::Vector2d max_offset = max_scroll_offset(); gfx::Vector2dF scroll_offset_dip; if (max_offset.x()) { scroll_offset_dip.set_x((scroll_offset.x() * max_scroll_offset_dip_.x()) / max_offset.x()); } if (max_offset.y()) { scroll_offset_dip.set_y((scroll_offset.y() * max_scroll_offset_dip_.y()) / max_offset.y()); } DCHECK_LE(0.f, scroll_offset_dip.x()); DCHECK_LE(0.f, scroll_offset_dip.y()); DCHECK(scroll_offset_dip.x() < max_scroll_offset_dip_.x() || scroll_offset_dip.x() - max_scroll_offset_dip_.x() < kEpsilon) << scroll_offset_dip.x() << " " << max_scroll_offset_dip_.x(); DCHECK(scroll_offset_dip.y() < max_scroll_offset_dip_.y() || scroll_offset_dip.y() - max_scroll_offset_dip_.y() < kEpsilon) << scroll_offset_dip.y() << " " << max_scroll_offset_dip_.y(); if (scroll_offset_dip_ == scroll_offset_dip) return; scroll_offset_dip_ = scroll_offset_dip; TRACE_EVENT_INSTANT2("android_webview", "BrowserViewRenderer::ScrollTo", TRACE_EVENT_SCOPE_THREAD, "x", scroll_offset_dip.x(), "y", scroll_offset_dip.y()); if (compositor_) { compositor_->DidChangeRootLayerScrollOffset( gfx::ScrollOffset(scroll_offset_dip_)); } }
171,614
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: asn1_get_bit_der (const unsigned char *der, int der_len, int *ret_len, unsigned char *str, int str_size, int *bit_len) { int len_len, len_byte; if (der_len <= 0) return ASN1_GENERIC_ERROR; len_byte = asn1_get_length_der (der, der_len, &len_len) - 1; if (len_byte < 0) return ASN1_DER_ERROR; *ret_len = len_byte + len_len + 1; *bit_len = len_byte * 8 - der[len_len]; if (str_size >= len_byte) memcpy (str, der + len_len + 1, len_byte); } Commit Message: CWE ID: CWE-189
asn1_get_bit_der (const unsigned char *der, int der_len, int *ret_len, unsigned char *str, int str_size, int *bit_len) { int len_len = 0, len_byte; if (der_len <= 0) return ASN1_GENERIC_ERROR; len_byte = asn1_get_length_der (der, der_len, &len_len) - 1; if (len_byte < 0) return ASN1_DER_ERROR; *ret_len = len_byte + len_len + 1; *bit_len = len_byte * 8 - der[len_len]; if (*bit_len <= 0) return ASN1_DER_ERROR; if (str_size >= len_byte) memcpy (str, der + len_len + 1, len_byte); }
165,177
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: const Chapters::Atom* Chapters::Edition::GetAtom(int index) const { if (index < 0) return NULL; if (index >= m_atoms_count) return NULL; return m_atoms + index; } Commit Message: libwebm: Pull from upstream Rolling mkvparser from upstream. Primarily for fixing a bug on parsing failures with certain Opus WebM files. Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae The diff is so huge because there were some style clean ups upstream. But it was ensured that there were no breaking changes when the style clean ups was done upstream. Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039 CWE ID: CWE-119
const Chapters::Atom* Chapters::Edition::GetAtom(int index) const const Chapters::Display* Chapters::Atom::GetDisplay(int index) const { if (index < 0) return NULL; if (index >= m_displays_count) return NULL; return m_displays + index; }
174,281
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: get_strings_2_svc(gstrings_arg *arg, struct svc_req *rqstp) { static gstrings_ret ret; char *prime_arg; gss_buffer_desc client_name, service_name; OM_uint32 minor_stat; kadm5_server_handle_t handle; const char *errmsg = NULL; xdr_free(xdr_gstrings_ret, &ret); if ((ret.code = new_server_handle(arg->api_version, rqstp, &handle))) goto exit_func; if ((ret.code = check_handle((void *)handle))) goto exit_func; ret.api_version = handle->api_version; if (setup_gss_names(rqstp, &client_name, &service_name) < 0) { ret.code = KADM5_FAILURE; goto exit_func; } if (krb5_unparse_name(handle->context, arg->princ, &prime_arg)) { ret.code = KADM5_BAD_PRINCIPAL; goto exit_func; } if (! cmp_gss_krb5_name(handle, rqst2name(rqstp), arg->princ) && (CHANGEPW_SERVICE(rqstp) || !kadm5int_acl_check(handle->context, rqst2name(rqstp), ACL_INQUIRE, arg->princ, NULL))) { ret.code = KADM5_AUTH_GET; log_unauth("kadm5_get_strings", prime_arg, &client_name, &service_name, rqstp); } else { ret.code = kadm5_get_strings((void *)handle, arg->princ, &ret.strings, &ret.count); if (ret.code != 0) errmsg = krb5_get_error_message(handle->context, ret.code); log_done("kadm5_get_strings", prime_arg, errmsg, &client_name, &service_name, rqstp); if (errmsg != NULL) krb5_free_error_message(handle->context, errmsg); } free(prime_arg); gss_release_buffer(&minor_stat, &client_name); gss_release_buffer(&minor_stat, &service_name); exit_func: free_server_handle(handle); return &ret; } Commit Message: Fix leaks in kadmin server stubs [CVE-2015-8631] In each kadmind server stub, initialize the client_name and server_name variables, and release them in the cleanup handler. Many of the stubs will otherwise leak the client and server name if krb5_unparse_name() fails. Also make sure to free the prime_arg variables in rename_principal_2_svc(), or we can leak the first one if unparsing the second one fails. Discovered by Simo Sorce. CVE-2015-8631: In all versions of MIT krb5, an authenticated attacker can cause kadmind to leak memory by supplying a null principal name in a request which uses one. Repeating these requests will eventually cause kadmind to exhaust all available memory. CVSSv2 Vector: AV:N/AC:L/Au:S/C:N/I:N/A:C/E:POC/RL:OF/RC:C ticket: 8343 (new) target_version: 1.14-next target_version: 1.13-next tags: pullup CWE ID: CWE-119
get_strings_2_svc(gstrings_arg *arg, struct svc_req *rqstp) { static gstrings_ret ret; char *prime_arg; gss_buffer_desc client_name = GSS_C_EMPTY_BUFFER; gss_buffer_desc service_name = GSS_C_EMPTY_BUFFER; OM_uint32 minor_stat; kadm5_server_handle_t handle; const char *errmsg = NULL; xdr_free(xdr_gstrings_ret, &ret); if ((ret.code = new_server_handle(arg->api_version, rqstp, &handle))) goto exit_func; if ((ret.code = check_handle((void *)handle))) goto exit_func; ret.api_version = handle->api_version; if (setup_gss_names(rqstp, &client_name, &service_name) < 0) { ret.code = KADM5_FAILURE; goto exit_func; } if (krb5_unparse_name(handle->context, arg->princ, &prime_arg)) { ret.code = KADM5_BAD_PRINCIPAL; goto exit_func; } if (! cmp_gss_krb5_name(handle, rqst2name(rqstp), arg->princ) && (CHANGEPW_SERVICE(rqstp) || !kadm5int_acl_check(handle->context, rqst2name(rqstp), ACL_INQUIRE, arg->princ, NULL))) { ret.code = KADM5_AUTH_GET; log_unauth("kadm5_get_strings", prime_arg, &client_name, &service_name, rqstp); } else { ret.code = kadm5_get_strings((void *)handle, arg->princ, &ret.strings, &ret.count); if (ret.code != 0) errmsg = krb5_get_error_message(handle->context, ret.code); log_done("kadm5_get_strings", prime_arg, errmsg, &client_name, &service_name, rqstp); if (errmsg != NULL) krb5_free_error_message(handle->context, errmsg); } free(prime_arg); exit_func: gss_release_buffer(&minor_stat, &client_name); gss_release_buffer(&minor_stat, &service_name); free_server_handle(handle); return &ret; }
167,518
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock_iocb *siocb = kiocb_to_siocb(kiocb); struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct unix_sock *u = unix_sk(sk); struct sockaddr_un *sunaddr = msg->msg_name; struct sock *other = NULL; int namelen = 0; /* fake GCC */ int err; unsigned int hash; struct sk_buff *skb; long timeo; struct scm_cookie tmp_scm; int max_level; int data_len = 0; if (NULL == siocb->scm) siocb->scm = &tmp_scm; wait_for_unix_gc(); err = scm_send(sock, msg, siocb->scm); if (err < 0) return err; err = -EOPNOTSUPP; if (msg->msg_flags&MSG_OOB) goto out; if (msg->msg_namelen) { err = unix_mkname(sunaddr, msg->msg_namelen, &hash); if (err < 0) goto out; namelen = err; } else { sunaddr = NULL; err = -ENOTCONN; other = unix_peer_get(sk); if (!other) goto out; } if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr && (err = unix_autobind(sock)) != 0) goto out; err = -EMSGSIZE; if (len > sk->sk_sndbuf - 32) goto out; if (len > SKB_MAX_ALLOC) data_len = min_t(size_t, len - SKB_MAX_ALLOC, MAX_SKB_FRAGS * PAGE_SIZE); skb = sock_alloc_send_pskb(sk, len - data_len, data_len, msg->msg_flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out; err = unix_scm_to_skb(siocb->scm, skb, true); if (err < 0) goto out_free; max_level = err + 1; unix_get_secdata(siocb->scm, skb); skb_put(skb, len - data_len); skb->data_len = data_len; skb->len = len; err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len); if (err) goto out_free; timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); restart: if (!other) { err = -ECONNRESET; if (sunaddr == NULL) goto out_free; other = unix_find_other(net, sunaddr, namelen, sk->sk_type, hash, &err); if (other == NULL) goto out_free; } if (sk_filter(other, skb) < 0) { /* Toss the packet but do not return any error to the sender */ err = len; goto out_free; } unix_state_lock(other); err = -EPERM; if (!unix_may_send(sk, other)) goto out_unlock; if (sock_flag(other, SOCK_DEAD)) { /* * Check with 1003.1g - what should * datagram error */ unix_state_unlock(other); sock_put(other); err = 0; unix_state_lock(sk); if (unix_peer(sk) == other) { unix_peer(sk) = NULL; unix_state_unlock(sk); unix_dgram_disconnected(sk, other); sock_put(other); err = -ECONNREFUSED; } else { unix_state_unlock(sk); } other = NULL; if (err) goto out_free; goto restart; } err = -EPIPE; if (other->sk_shutdown & RCV_SHUTDOWN) goto out_unlock; if (sk->sk_type != SOCK_SEQPACKET) { err = security_unix_may_send(sk->sk_socket, other->sk_socket); if (err) goto out_unlock; } if (unix_peer(other) != sk && unix_recvq_full(other)) { if (!timeo) { err = -EAGAIN; goto out_unlock; } timeo = unix_wait_for_peer(other, timeo); err = sock_intr_errno(timeo); if (signal_pending(current)) goto out_free; goto restart; } if (sock_flag(other, SOCK_RCVTSTAMP)) __net_timestamp(skb); maybe_add_creds(skb, sock, other); skb_queue_tail(&other->sk_receive_queue, skb); if (max_level > unix_sk(other)->recursion_level) unix_sk(other)->recursion_level = max_level; unix_state_unlock(other); other->sk_data_ready(other, len); sock_put(other); scm_destroy(siocb->scm); return len; out_unlock: unix_state_unlock(other); out_free: kfree_skb(skb); out: if (other) sock_put(other); scm_destroy(siocb->scm); return err; } Commit Message: af_netlink: force credentials passing [CVE-2012-3520] Pablo Neira Ayuso discovered that avahi and potentially NetworkManager accept spoofed Netlink messages because of a kernel bug. The kernel passes all-zero SCM_CREDENTIALS ancillary data to the receiver if the sender did not provide such data, instead of not including any such data at all or including the correct data from the peer (as it is the case with AF_UNIX). This bug was introduced in commit 16e572626961 (af_unix: dont send SCM_CREDENTIALS by default) This patch forces passing credentials for netlink, as before the regression. Another fix would be to not add SCM_CREDENTIALS in netlink messages if not provided by the sender, but it might break some programs. With help from Florian Weimer & Petr Matousek This issue is designated as CVE-2012-3520 Signed-off-by: Eric Dumazet <[email protected]> Cc: Petr Matousek <[email protected]> Cc: Florian Weimer <[email protected]> Cc: Pablo Neira Ayuso <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-287
static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock_iocb *siocb = kiocb_to_siocb(kiocb); struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct unix_sock *u = unix_sk(sk); struct sockaddr_un *sunaddr = msg->msg_name; struct sock *other = NULL; int namelen = 0; /* fake GCC */ int err; unsigned int hash; struct sk_buff *skb; long timeo; struct scm_cookie tmp_scm; int max_level; int data_len = 0; if (NULL == siocb->scm) siocb->scm = &tmp_scm; wait_for_unix_gc(); err = scm_send(sock, msg, siocb->scm, false); if (err < 0) return err; err = -EOPNOTSUPP; if (msg->msg_flags&MSG_OOB) goto out; if (msg->msg_namelen) { err = unix_mkname(sunaddr, msg->msg_namelen, &hash); if (err < 0) goto out; namelen = err; } else { sunaddr = NULL; err = -ENOTCONN; other = unix_peer_get(sk); if (!other) goto out; } if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr && (err = unix_autobind(sock)) != 0) goto out; err = -EMSGSIZE; if (len > sk->sk_sndbuf - 32) goto out; if (len > SKB_MAX_ALLOC) data_len = min_t(size_t, len - SKB_MAX_ALLOC, MAX_SKB_FRAGS * PAGE_SIZE); skb = sock_alloc_send_pskb(sk, len - data_len, data_len, msg->msg_flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out; err = unix_scm_to_skb(siocb->scm, skb, true); if (err < 0) goto out_free; max_level = err + 1; unix_get_secdata(siocb->scm, skb); skb_put(skb, len - data_len); skb->data_len = data_len; skb->len = len; err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len); if (err) goto out_free; timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); restart: if (!other) { err = -ECONNRESET; if (sunaddr == NULL) goto out_free; other = unix_find_other(net, sunaddr, namelen, sk->sk_type, hash, &err); if (other == NULL) goto out_free; } if (sk_filter(other, skb) < 0) { /* Toss the packet but do not return any error to the sender */ err = len; goto out_free; } unix_state_lock(other); err = -EPERM; if (!unix_may_send(sk, other)) goto out_unlock; if (sock_flag(other, SOCK_DEAD)) { /* * Check with 1003.1g - what should * datagram error */ unix_state_unlock(other); sock_put(other); err = 0; unix_state_lock(sk); if (unix_peer(sk) == other) { unix_peer(sk) = NULL; unix_state_unlock(sk); unix_dgram_disconnected(sk, other); sock_put(other); err = -ECONNREFUSED; } else { unix_state_unlock(sk); } other = NULL; if (err) goto out_free; goto restart; } err = -EPIPE; if (other->sk_shutdown & RCV_SHUTDOWN) goto out_unlock; if (sk->sk_type != SOCK_SEQPACKET) { err = security_unix_may_send(sk->sk_socket, other->sk_socket); if (err) goto out_unlock; } if (unix_peer(other) != sk && unix_recvq_full(other)) { if (!timeo) { err = -EAGAIN; goto out_unlock; } timeo = unix_wait_for_peer(other, timeo); err = sock_intr_errno(timeo); if (signal_pending(current)) goto out_free; goto restart; } if (sock_flag(other, SOCK_RCVTSTAMP)) __net_timestamp(skb); maybe_add_creds(skb, sock, other); skb_queue_tail(&other->sk_receive_queue, skb); if (max_level > unix_sk(other)->recursion_level) unix_sk(other)->recursion_level = max_level; unix_state_unlock(other); other->sk_data_ready(other, len); sock_put(other); scm_destroy(siocb->scm); return len; out_unlock: unix_state_unlock(other); out_free: kfree_skb(skb); out: if (other) sock_put(other); scm_destroy(siocb->scm); return err; }
165,579
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void BrowserRenderProcessHost::PropagateBrowserCommandLineToRenderer( const CommandLine& browser_cmd, CommandLine* renderer_cmd) const { static const char* const kSwitchNames[] = { switches::kChromeFrame, switches::kDisable3DAPIs, switches::kDisableAcceleratedCompositing, switches::kDisableApplicationCache, switches::kDisableAudio, switches::kDisableBreakpad, switches::kDisableDataTransferItems, switches::kDisableDatabases, switches::kDisableDesktopNotifications, switches::kDisableDeviceOrientation, switches::kDisableFileSystem, switches::kDisableGeolocation, switches::kDisableGLMultisampling, switches::kDisableGLSLTranslator, switches::kDisableGpuVsync, switches::kDisableIndexedDatabase, switches::kDisableJavaScriptI18NAPI, switches::kDisableLocalStorage, switches::kDisableLogging, switches::kDisableSeccompSandbox, switches::kDisableSessionStorage, switches::kDisableSharedWorkers, switches::kDisableSpeechInput, switches::kDisableWebAudio, switches::kDisableWebSockets, switches::kEnableAdaptive, switches::kEnableBenchmarking, switches::kEnableDCHECK, switches::kEnableGPUServiceLogging, switches::kEnableGPUClientLogging, switches::kEnableLogging, switches::kEnableMediaStream, switches::kEnablePepperTesting, #if defined(OS_MACOSX) switches::kEnableSandboxLogging, #endif switches::kEnableSeccompSandbox, switches::kEnableStatsTable, switches::kEnableVideoFullscreen, switches::kEnableVideoLogging, switches::kFullMemoryCrashReport, #if !defined (GOOGLE_CHROME_BUILD) switches::kInProcessPlugins, #endif // GOOGLE_CHROME_BUILD switches::kInProcessWebGL, switches::kJavaScriptFlags, switches::kLoggingLevel, switches::kLowLatencyAudio, switches::kNoJsRandomness, switches::kNoReferrers, switches::kNoSandbox, switches::kPlaybackMode, switches::kPpapiOutOfProcess, switches::kRecordMode, switches::kRegisterPepperPlugins, switches::kRendererAssertTest, #if !defined(OFFICIAL_BUILD) switches::kRendererCheckFalseTest, #endif // !defined(OFFICIAL_BUILD) switches::kRendererCrashTest, switches::kRendererStartupDialog, switches::kShowPaintRects, switches::kSimpleDataSource, switches::kTestSandbox, switches::kUseGL, switches::kUserAgent, switches::kV, switches::kVideoThreads, switches::kVModule, switches::kWebCoreLogChannels, }; renderer_cmd->CopySwitchesFrom(browser_cmd, kSwitchNames, arraysize(kSwitchNames)); if (profile()->IsOffTheRecord() && !browser_cmd.HasSwitch(switches::kDisableDatabases)) { renderer_cmd->AppendSwitch(switches::kDisableDatabases); } } Commit Message: DevTools: move DevToolsAgent/Client into content. BUG=84078 TEST= Review URL: http://codereview.chromium.org/7461019 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@93596 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-20
void BrowserRenderProcessHost::PropagateBrowserCommandLineToRenderer( const CommandLine& browser_cmd, CommandLine* renderer_cmd) const { static const char* const kSwitchNames[] = { switches::kChromeFrame, switches::kDisable3DAPIs, switches::kDisableAcceleratedCompositing, switches::kDisableApplicationCache, switches::kDisableAudio, switches::kDisableBreakpad, switches::kDisableDataTransferItems, switches::kDisableDatabases, switches::kDisableDesktopNotifications, switches::kDisableDeviceOrientation, switches::kDisableFileSystem, switches::kDisableGeolocation, switches::kDisableGLMultisampling, switches::kDisableGLSLTranslator, switches::kDisableGpuVsync, switches::kDisableIndexedDatabase, switches::kDisableJavaScriptI18NAPI, switches::kDisableLocalStorage, switches::kDisableLogging, switches::kDisableSeccompSandbox, switches::kDisableSessionStorage, switches::kDisableSharedWorkers, switches::kDisableSpeechInput, switches::kDisableWebAudio, switches::kDisableWebSockets, switches::kEnableAdaptive, switches::kEnableBenchmarking, switches::kEnableDCHECK, switches::kEnableGPUServiceLogging, switches::kEnableGPUClientLogging, switches::kEnableLogging, switches::kEnableMediaStream, switches::kEnablePepperTesting, #if defined(OS_MACOSX) switches::kEnableSandboxLogging, #endif switches::kEnableSeccompSandbox, switches::kEnableStatsTable, switches::kEnableVideoFullscreen, switches::kEnableVideoLogging, switches::kFullMemoryCrashReport, #if !defined (GOOGLE_CHROME_BUILD) switches::kInProcessPlugins, #endif // GOOGLE_CHROME_BUILD switches::kInProcessWebGL, switches::kJavaScriptFlags, switches::kLoggingLevel, switches::kLowLatencyAudio, switches::kNoJsRandomness, switches::kNoReferrers, switches::kNoSandbox, switches::kPlaybackMode, switches::kPpapiOutOfProcess, switches::kRecordMode, switches::kRegisterPepperPlugins, switches::kRemoteShellPort, switches::kRendererAssertTest, #if !defined(OFFICIAL_BUILD) switches::kRendererCheckFalseTest, #endif // !defined(OFFICIAL_BUILD) switches::kRendererCrashTest, switches::kRendererStartupDialog, switches::kShowPaintRects, switches::kSimpleDataSource, switches::kTestSandbox, switches::kUseGL, switches::kUserAgent, switches::kV, switches::kVideoThreads, switches::kVModule, switches::kWebCoreLogChannels, }; renderer_cmd->CopySwitchesFrom(browser_cmd, kSwitchNames, arraysize(kSwitchNames)); if (profile()->IsOffTheRecord() && !browser_cmd.HasSwitch(switches::kDisableDatabases)) { renderer_cmd->AppendSwitch(switches::kDisableDatabases); } }
170,325
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void WebGL2RenderingContextBase::texImage2D(GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, GLintptr offset) { if (isContextLost()) return; if (!ValidateTexture2DBinding("texImage2D", target)) return; if (!bound_pixel_unpack_buffer_) { SynthesizeGLError(GL_INVALID_OPERATION, "texImage2D", "no bound PIXEL_UNPACK_BUFFER"); return; } if (!ValidateTexFunc("texImage2D", kTexImage, kSourceUnpackBuffer, target, level, internalformat, width, height, 1, border, format, type, 0, 0, 0)) return; if (!ValidateValueFitNonNegInt32("texImage2D", "offset", offset)) return; ContextGL()->TexImage2D( target, level, ConvertTexInternalFormat(internalformat, type), width, height, border, format, type, reinterpret_cast<const void*>(offset)); } Commit Message: Implement 2D texture uploading from client array with FLIP_Y or PREMULTIPLY_ALPHA. BUG=774174 TEST=https://github.com/KhronosGroup/WebGL/pull/2555 [email protected] Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel Change-Id: I4f4e7636314502451104730501a5048a5d7b9f3f Reviewed-on: https://chromium-review.googlesource.com/808665 Commit-Queue: Zhenyao Mo <[email protected]> Reviewed-by: Kenneth Russell <[email protected]> Cr-Commit-Position: refs/heads/master@{#522003} CWE ID: CWE-125
void WebGL2RenderingContextBase::texImage2D(GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, GLintptr offset) { if (isContextLost()) return; if (!ValidateTexture2DBinding("texImage2D", target)) return; if (!bound_pixel_unpack_buffer_) { SynthesizeGLError(GL_INVALID_OPERATION, "texImage2D", "no bound PIXEL_UNPACK_BUFFER"); return; } if (unpack_flip_y_ || unpack_premultiply_alpha_) { SynthesizeGLError( GL_INVALID_OPERATION, "texImage2D", "FLIP_Y or PREMULTIPLY_ALPHA isn't allowed while uploading from PBO"); return; } if (!ValidateTexFunc("texImage2D", kTexImage, kSourceUnpackBuffer, target, level, internalformat, width, height, 1, border, format, type, 0, 0, 0)) return; if (!ValidateValueFitNonNegInt32("texImage2D", "offset", offset)) return; ContextGL()->TexImage2D( target, level, ConvertTexInternalFormat(internalformat, type), width, height, border, format, type, reinterpret_cast<const void*>(offset)); }
172,674
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void DesktopWindowTreeHostX11::SetVisible(bool visible) { if (compositor()) compositor()->SetVisible(visible); if (IsVisible() != visible) native_widget_delegate_->OnNativeWidgetVisibilityChanged(visible); } Commit Message: Fix PIP window being blank after minimize/show DesktopWindowTreeHostX11::SetVisible only made the call into OnNativeWidgetVisibilityChanged when transitioning from shown to minimized and not vice versa. This is because this change https://chromium-review.googlesource.com/c/chromium/src/+/1437263 considered IsVisible to be true when minimized, which made IsVisible always true in this case. This caused layers to be hidden but never shown again. This is a reland of: https://chromium-review.googlesource.com/c/chromium/src/+/1580103 Bug: 949199 Change-Id: I2151cd09e537d8ce8781897f43a3b8e9cec75996 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1584617 Reviewed-by: Scott Violet <[email protected]> Commit-Queue: enne <[email protected]> Cr-Commit-Position: refs/heads/master@{#654280} CWE ID: CWE-284
void DesktopWindowTreeHostX11::SetVisible(bool visible) { if (is_compositor_set_visible_ == visible) return; is_compositor_set_visible_ = visible; if (compositor()) compositor()->SetVisible(visible); native_widget_delegate_->OnNativeWidgetVisibilityChanged(visible); }
172,516
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int tls_decrypt_ticket(SSL *s, const unsigned char *etick, int eticklen, const unsigned char *sess_id, int sesslen, SSL_SESSION **psess) { SSL_SESSION *sess; unsigned char *sdec; const unsigned char *p; int slen, mlen, renew_ticket = 0, ret = -1; unsigned char tick_hmac[EVP_MAX_MD_SIZE]; HMAC_CTX *hctx = NULL; EVP_CIPHER_CTX *ctx; SSL_CTX *tctx = s->initial_ctx; /* Need at least keyname + iv + some encrypted data */ if (eticklen < 48) return 2; /* Initialize session ticket encryption and HMAC contexts */ hctx = HMAC_CTX_new(); if (hctx == NULL) hctx = HMAC_CTX_new(); if (hctx == NULL) return -2; ctx = EVP_CIPHER_CTX_new(); if (ctx == NULL) { ret = -2; goto err; } if (tctx->tlsext_ticket_key_cb) { unsigned char *nctick = (unsigned char *)etick; int rv = tctx->tlsext_ticket_key_cb(s, nctick, nctick + 16, ctx, hctx, 0); if (rv < 0) goto err; if (rv == 0) { ret = 2; goto err; } if (rv == 2) renew_ticket = 1; } else { /* Check key name matches */ if (memcmp(etick, tctx->tlsext_tick_key_name, sizeof(tctx->tlsext_tick_key_name)) != 0) { ret = 2; goto err; } if (HMAC_Init_ex(hctx, tctx->tlsext_tick_hmac_key, sizeof(tctx->tlsext_tick_hmac_key), EVP_sha256(), NULL) <= 0 || EVP_DecryptInit_ex(ctx, EVP_aes_256_cbc(), NULL, tctx->tlsext_tick_aes_key, etick + sizeof(tctx->tlsext_tick_key_name)) <= 0) { goto err; } } /* * Attempt to process session ticket, first conduct sanity and integrity * checks on ticket. if (mlen < 0) { goto err; } eticklen -= mlen; /* Check HMAC of encrypted ticket */ if (HMAC_Update(hctx, etick, eticklen) <= 0 if (CRYPTO_memcmp(tick_hmac, etick + eticklen, mlen)) { EVP_CIPHER_CTX_free(ctx); return 2; } /* Attempt to decrypt session data */ /* Move p after IV to start of encrypted ticket, update length */ p = etick + 16 + EVP_CIPHER_CTX_iv_length(ctx); eticklen -= 16 + EVP_CIPHER_CTX_iv_length(ctx); sdec = OPENSSL_malloc(eticklen); if (sdec == NULL || EVP_DecryptUpdate(ctx, sdec, &slen, p, eticklen) <= 0) { EVP_CIPHER_CTX_free(ctx); OPENSSL_free(sdec); return -1; } if (EVP_DecryptFinal(ctx, sdec + slen, &mlen) <= 0) { EVP_CIPHER_CTX_free(ctx); OPENSSL_free(sdec); return 2; } slen += mlen; EVP_CIPHER_CTX_free(ctx); ctx = NULL; p = sdec; sess = d2i_SSL_SESSION(NULL, &p, slen); OPENSSL_free(sdec); if (sess) { /* * The session ID, if non-empty, is used by some clients to detect * that the ticket has been accepted. So we copy it to the session * structure. If it is empty set length to zero as required by * standard. */ if (sesslen) memcpy(sess->session_id, sess_id, sesslen); sess->session_id_length = sesslen; *psess = sess; if (renew_ticket) return 4; else return 3; } ERR_clear_error(); /* * For session parse failure, indicate that we need to send a new ticket. */ return 2; err: EVP_CIPHER_CTX_free(ctx); HMAC_CTX_free(hctx); return ret; } Commit Message: CWE ID: CWE-20
static int tls_decrypt_ticket(SSL *s, const unsigned char *etick, int eticklen, const unsigned char *sess_id, int sesslen, SSL_SESSION **psess) { SSL_SESSION *sess; unsigned char *sdec; const unsigned char *p; int slen, mlen, renew_ticket = 0, ret = -1; unsigned char tick_hmac[EVP_MAX_MD_SIZE]; HMAC_CTX *hctx = NULL; EVP_CIPHER_CTX *ctx; SSL_CTX *tctx = s->initial_ctx; /* Initialize session ticket encryption and HMAC contexts */ hctx = HMAC_CTX_new(); if (hctx == NULL) hctx = HMAC_CTX_new(); if (hctx == NULL) return -2; ctx = EVP_CIPHER_CTX_new(); if (ctx == NULL) { ret = -2; goto err; } if (tctx->tlsext_ticket_key_cb) { unsigned char *nctick = (unsigned char *)etick; int rv = tctx->tlsext_ticket_key_cb(s, nctick, nctick + 16, ctx, hctx, 0); if (rv < 0) goto err; if (rv == 0) { ret = 2; goto err; } if (rv == 2) renew_ticket = 1; } else { /* Check key name matches */ if (memcmp(etick, tctx->tlsext_tick_key_name, sizeof(tctx->tlsext_tick_key_name)) != 0) { ret = 2; goto err; } if (HMAC_Init_ex(hctx, tctx->tlsext_tick_hmac_key, sizeof(tctx->tlsext_tick_hmac_key), EVP_sha256(), NULL) <= 0 || EVP_DecryptInit_ex(ctx, EVP_aes_256_cbc(), NULL, tctx->tlsext_tick_aes_key, etick + sizeof(tctx->tlsext_tick_key_name)) <= 0) { goto err; } } /* * Attempt to process session ticket, first conduct sanity and integrity * checks on ticket. if (mlen < 0) { goto err; } /* Sanity check ticket length: must exceed keyname + IV + HMAC */ if (eticklen <= TLSEXT_KEYNAME_LENGTH + EVP_CIPHER_CTX_iv_length(ctx) + mlen) { ret = 2; goto err; } eticklen -= mlen; /* Check HMAC of encrypted ticket */ if (HMAC_Update(hctx, etick, eticklen) <= 0 if (CRYPTO_memcmp(tick_hmac, etick + eticklen, mlen)) { EVP_CIPHER_CTX_free(ctx); return 2; } /* Attempt to decrypt session data */ /* Move p after IV to start of encrypted ticket, update length */ p = etick + 16 + EVP_CIPHER_CTX_iv_length(ctx); eticklen -= 16 + EVP_CIPHER_CTX_iv_length(ctx); sdec = OPENSSL_malloc(eticklen); if (sdec == NULL || EVP_DecryptUpdate(ctx, sdec, &slen, p, eticklen) <= 0) { EVP_CIPHER_CTX_free(ctx); OPENSSL_free(sdec); return -1; } if (EVP_DecryptFinal(ctx, sdec + slen, &mlen) <= 0) { EVP_CIPHER_CTX_free(ctx); OPENSSL_free(sdec); return 2; } slen += mlen; EVP_CIPHER_CTX_free(ctx); ctx = NULL; p = sdec; sess = d2i_SSL_SESSION(NULL, &p, slen); OPENSSL_free(sdec); if (sess) { /* * The session ID, if non-empty, is used by some clients to detect * that the ticket has been accepted. So we copy it to the session * structure. If it is empty set length to zero as required by * standard. */ if (sesslen) memcpy(sess->session_id, sess_id, sesslen); sess->session_id_length = sesslen; *psess = sess; if (renew_ticket) return 4; else return 3; } ERR_clear_error(); /* * For session parse failure, indicate that we need to send a new ticket. */ return 2; err: EVP_CIPHER_CTX_free(ctx); HMAC_CTX_free(hctx); return ret; }
164,966
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void MakeCacheAndGroup(const GURL& manifest_url, int64_t group_id, int64_t cache_id, bool add_to_database) { AppCacheEntry default_entry(AppCacheEntry::EXPLICIT, cache_id + kDefaultEntryIdOffset, kDefaultEntrySize); group_ = new AppCacheGroup(storage(), manifest_url, group_id); cache_ = new AppCache(storage(), cache_id); cache_->AddEntry(kDefaultEntryUrl, default_entry); cache_->set_complete(true); group_->AddCache(cache_.get()); url::Origin manifest_origin(url::Origin::Create(manifest_url)); if (add_to_database) { AppCacheDatabase::GroupRecord group_record; group_record.group_id = group_id; group_record.manifest_url = manifest_url; group_record.origin = manifest_origin; EXPECT_TRUE(database()->InsertGroup(&group_record)); AppCacheDatabase::CacheRecord cache_record; cache_record.cache_id = cache_id; cache_record.group_id = group_id; cache_record.online_wildcard = false; cache_record.update_time = kZeroTime; cache_record.cache_size = kDefaultEntrySize; EXPECT_TRUE(database()->InsertCache(&cache_record)); AppCacheDatabase::EntryRecord entry_record; entry_record.cache_id = cache_id; entry_record.url = kDefaultEntryUrl; entry_record.flags = default_entry.types(); entry_record.response_id = default_entry.response_id(); entry_record.response_size = default_entry.response_size(); EXPECT_TRUE(database()->InsertEntry(&entry_record)); storage()->usage_map_[manifest_origin] = default_entry.response_size(); } } Commit Message: Reland "AppCache: Add padding to cross-origin responses." This is a reland of 85b389caa7d725cdd31f59e9a2b79ff54804b7b7 Initialized CacheRecord::padding_size to 0. Original change's description: > AppCache: Add padding to cross-origin responses. > > Bug: 918293 > Change-Id: I4f16640f06feac009d6bbbb624951da6d2669f6c > Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1488059 > Commit-Queue: Staphany Park <[email protected]> > Reviewed-by: Victor Costan <[email protected]> > Reviewed-by: Marijn Kruisselbrink <[email protected]> > Cr-Commit-Position: refs/heads/master@{#644624} Bug: 918293 Change-Id: Ie1d3f99c7e8a854d33255a4d66243da2ce16441c Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1539906 Reviewed-by: Victor Costan <[email protected]> Commit-Queue: Staphany Park <[email protected]> Cr-Commit-Position: refs/heads/master@{#644719} CWE ID: CWE-200
void MakeCacheAndGroup(const GURL& manifest_url, int64_t group_id, int64_t cache_id, bool add_to_database) { AppCacheEntry default_entry(AppCacheEntry::EXPLICIT, cache_id + kDefaultEntryIdOffset, kDefaultEntrySize, kDefaultEntryPadding); group_ = new AppCacheGroup(storage(), manifest_url, group_id); cache_ = new AppCache(storage(), cache_id); cache_->AddEntry(kDefaultEntryUrl, default_entry); cache_->set_complete(true); group_->AddCache(cache_.get()); url::Origin manifest_origin(url::Origin::Create(manifest_url)); if (add_to_database) { AppCacheDatabase::GroupRecord group_record; group_record.group_id = group_id; group_record.manifest_url = manifest_url; group_record.origin = manifest_origin; EXPECT_TRUE(database()->InsertGroup(&group_record)); AppCacheDatabase::CacheRecord cache_record; cache_record.cache_id = cache_id; cache_record.group_id = group_id; cache_record.online_wildcard = false; cache_record.update_time = kZeroTime; cache_record.cache_size = kDefaultEntrySize; cache_record.padding_size = kDefaultEntryPadding; EXPECT_TRUE(database()->InsertCache(&cache_record)); AppCacheDatabase::EntryRecord entry_record; entry_record.cache_id = cache_id; entry_record.url = kDefaultEntryUrl; entry_record.flags = default_entry.types(); entry_record.response_id = default_entry.response_id(); entry_record.response_size = default_entry.response_size(); entry_record.padding_size = default_entry.padding_size(); EXPECT_TRUE(database()->InsertEntry(&entry_record)); storage()->usage_map_[manifest_origin] = default_entry.response_size() + default_entry.padding_size(); } }
172,986
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: rar_read_ahead(struct archive_read *a, size_t min, ssize_t *avail) { struct rar *rar = (struct rar *)(a->format->data); const void *h = __archive_read_ahead(a, min, avail); int ret; if (avail) { if (a->archive.read_data_is_posix_read && *avail > (ssize_t)a->archive.read_data_requested) *avail = a->archive.read_data_requested; if (*avail > rar->bytes_remaining) *avail = (ssize_t)rar->bytes_remaining; if (*avail < 0) return NULL; else if (*avail == 0 && rar->main_flags & MHD_VOLUME && rar->file_flags & FHD_SPLIT_AFTER) { ret = archive_read_format_rar_read_header(a, a->entry); if (ret == (ARCHIVE_EOF)) { rar->has_endarc_header = 1; ret = archive_read_format_rar_read_header(a, a->entry); } if (ret != (ARCHIVE_OK)) return NULL; return rar_read_ahead(a, min, avail); } } return h; } Commit Message: rar: file split across multi-part archives must match Fuzzing uncovered some UAF and memory overrun bugs where a file in a single file archive reported that it was split across multiple volumes. This was caused by ppmd7 operations calling rar_br_fillup. This would invoke rar_read_ahead, which would in some situations invoke archive_read_format_rar_read_header. That would check the new file name against the old file name, and if they didn't match up it would free the ppmd7 buffer and allocate a new one. However, because the ppmd7 decoder wasn't actually done with the buffer, it would continue to used the freed buffer. Both reads and writes to the freed region can be observed. This is quite tricky to solve: once the buffer has been freed it is too late, as the ppmd7 decoder functions almost universally assume success - there's no way for ppmd_read to signal error, nor are there good ways for functions like Range_Normalise to propagate them. So we can't detect after the fact that we're in an invalid state - e.g. by checking rar->cursor, we have to prevent ourselves from ever ending up there. So, when we are in the dangerous part or rar_read_ahead that assumes a valid split, we set a flag force read_header to either go down the path for split files or bail. This means that the ppmd7 decoder keeps a valid buffer and just runs out of data. Found with a combination of AFL, afl-rb and qsym. CWE ID: CWE-416
rar_read_ahead(struct archive_read *a, size_t min, ssize_t *avail) { struct rar *rar = (struct rar *)(a->format->data); const void *h = __archive_read_ahead(a, min, avail); int ret; if (avail) { if (a->archive.read_data_is_posix_read && *avail > (ssize_t)a->archive.read_data_requested) *avail = a->archive.read_data_requested; if (*avail > rar->bytes_remaining) *avail = (ssize_t)rar->bytes_remaining; if (*avail < 0) return NULL; else if (*avail == 0 && rar->main_flags & MHD_VOLUME && rar->file_flags & FHD_SPLIT_AFTER) { rar->filename_must_match = 1; ret = archive_read_format_rar_read_header(a, a->entry); if (ret == (ARCHIVE_EOF)) { rar->has_endarc_header = 1; ret = archive_read_format_rar_read_header(a, a->entry); } rar->filename_must_match = 0; if (ret != (ARCHIVE_OK)) return NULL; return rar_read_ahead(a, min, avail); } } return h; }
168,930
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: OMX::buffer_id OMXNodeInstance::findBufferID(OMX_BUFFERHEADERTYPE *bufferHeader) { return (OMX::buffer_id)bufferHeader; } Commit Message: IOMX: Enable buffer ptr to buffer id translation for arm32 Bug: 20634516 Change-Id: Iac9eac3cb251eccd9bbad5df7421a07edc21da0c (cherry picked from commit 2d6b6601743c3c6960c6511a2cb774ef902759f4) CWE ID: CWE-119
OMX::buffer_id OMXNodeInstance::findBufferID(OMX_BUFFERHEADERTYPE *bufferHeader) {
173,358
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static struct inode *ext4_alloc_inode(struct super_block *sb) { struct ext4_inode_info *ei; ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS); if (!ei) return NULL; ei->vfs_inode.i_version = 1; ei->vfs_inode.i_data.writeback_index = 0; memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache)); INIT_LIST_HEAD(&ei->i_prealloc_list); spin_lock_init(&ei->i_prealloc_lock); /* * Note: We can be called before EXT4_SB(sb)->s_journal is set, * therefore it can be null here. Don't check it, just initialize * jinode. */ jbd2_journal_init_jbd_inode(&ei->jinode, &ei->vfs_inode); ei->i_reserved_data_blocks = 0; ei->i_reserved_meta_blocks = 0; ei->i_allocated_meta_blocks = 0; ei->i_da_metadata_calc_len = 0; ei->i_delalloc_reserved_flag = 0; spin_lock_init(&(ei->i_block_reservation_lock)); #ifdef CONFIG_QUOTA ei->i_reserved_quota = 0; #endif INIT_LIST_HEAD(&ei->i_completed_io_list); ei->cur_aio_dio = NULL; ei->i_sync_tid = 0; ei->i_datasync_tid = 0; return &ei->vfs_inode; } Commit Message: ext4: use ext4_get_block_write in buffer write Allocate uninitialized extent before ext4 buffer write and convert the extent to initialized after io completes. The purpose is to make sure an extent can only be marked initialized after it has been written with new data so we can safely drop the i_mutex lock in ext4 DIO read without exposing stale data. This helps to improve multi-thread DIO read performance on high-speed disks. Skip the nobh and data=journal mount cases to make things simple for now. Signed-off-by: Jiaying Zhang <[email protected]> Signed-off-by: "Theodore Ts'o" <[email protected]> CWE ID:
static struct inode *ext4_alloc_inode(struct super_block *sb) { struct ext4_inode_info *ei; ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS); if (!ei) return NULL; ei->vfs_inode.i_version = 1; ei->vfs_inode.i_data.writeback_index = 0; memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache)); INIT_LIST_HEAD(&ei->i_prealloc_list); spin_lock_init(&ei->i_prealloc_lock); /* * Note: We can be called before EXT4_SB(sb)->s_journal is set, * therefore it can be null here. Don't check it, just initialize * jinode. */ jbd2_journal_init_jbd_inode(&ei->jinode, &ei->vfs_inode); ei->i_reserved_data_blocks = 0; ei->i_reserved_meta_blocks = 0; ei->i_allocated_meta_blocks = 0; ei->i_da_metadata_calc_len = 0; ei->i_delalloc_reserved_flag = 0; spin_lock_init(&(ei->i_block_reservation_lock)); #ifdef CONFIG_QUOTA ei->i_reserved_quota = 0; #endif INIT_LIST_HEAD(&ei->i_completed_io_list); spin_lock_init(&ei->i_completed_io_lock); ei->cur_aio_dio = NULL; ei->i_sync_tid = 0; ei->i_datasync_tid = 0; return &ei->vfs_inode; }
167,554
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: OTRBrowserContextImpl::OTRBrowserContextImpl( BrowserContextImpl* original, BrowserContextIODataImpl* original_io_data) : BrowserContext(new OTRBrowserContextIODataImpl(original_io_data)), original_context_(original), weak_ptr_factory_(this) { BrowserContextDependencyManager::GetInstance() ->CreateBrowserContextServices(this); } Commit Message: CWE ID: CWE-20
OTRBrowserContextImpl::OTRBrowserContextImpl( BrowserContextImpl* original, BrowserContextIODataImpl* original_io_data) : BrowserContext(new OTRBrowserContextIODataImpl(original_io_data)), original_context_(original) { BrowserContextDependencyManager::GetInstance() ->CreateBrowserContextServices(this); }
165,416
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void kvmclock_reset(struct kvm_vcpu *vcpu) { if (vcpu->arch.time_page) { kvm_release_page_dirty(vcpu->arch.time_page); vcpu->arch.time_page = NULL; } } Commit Message: KVM: x86: Convert MSR_KVM_SYSTEM_TIME to use gfn_to_hva_cache functions (CVE-2013-1797) There is a potential use after free issue with the handling of MSR_KVM_SYSTEM_TIME. If the guest specifies a GPA in a movable or removable memory such as frame buffers then KVM might continue to write to that address even after it's removed via KVM_SET_USER_MEMORY_REGION. KVM pins the page in memory so it's unlikely to cause an issue, but if the user space component re-purposes the memory previously used for the guest, then the guest will be able to corrupt that memory. Tested: Tested against kvmclock unit test Signed-off-by: Andrew Honig <[email protected]> Signed-off-by: Marcelo Tosatti <[email protected]> CWE ID: CWE-399
static void kvmclock_reset(struct kvm_vcpu *vcpu) { vcpu->arch.pv_time_enabled = false; }
166,119
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int atusb_get_and_show_revision(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; unsigned char buffer[3]; int ret; /* Get a couple of the ATMega Firmware values */ ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0, buffer, 3, 1000); if (ret >= 0) { atusb->fw_ver_maj = buffer[0]; atusb->fw_ver_min = buffer[1]; atusb->fw_hw_type = buffer[2]; dev_info(&usb_dev->dev, "Firmware: major: %u, minor: %u, hardware type: %u\n", atusb->fw_ver_maj, atusb->fw_ver_min, atusb->fw_hw_type); } if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 2) { dev_info(&usb_dev->dev, "Firmware version (%u.%u) predates our first public release.", atusb->fw_ver_maj, atusb->fw_ver_min); dev_info(&usb_dev->dev, "Please update to version 0.2 or newer"); } return ret; } Commit Message: ieee802154: atusb: do not use the stack for buffers to make them DMA able From 4.9 we should really avoid using the stack here as this will not be DMA able on various platforms. This changes the buffers already being present in time of 4.9 being released. This should go into stable as well. Reported-by: Dan Carpenter <[email protected]> Cc: [email protected] Signed-off-by: Stefan Schmidt <[email protected]> Signed-off-by: Marcel Holtmann <[email protected]> CWE ID: CWE-119
static int atusb_get_and_show_revision(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; unsigned char *buffer; int ret; buffer = kmalloc(3, GFP_KERNEL); if (!buffer) return -ENOMEM; /* Get a couple of the ATMega Firmware values */ ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0, buffer, 3, 1000); if (ret >= 0) { atusb->fw_ver_maj = buffer[0]; atusb->fw_ver_min = buffer[1]; atusb->fw_hw_type = buffer[2]; dev_info(&usb_dev->dev, "Firmware: major: %u, minor: %u, hardware type: %u\n", atusb->fw_ver_maj, atusb->fw_ver_min, atusb->fw_hw_type); } if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 2) { dev_info(&usb_dev->dev, "Firmware version (%u.%u) predates our first public release.", atusb->fw_ver_maj, atusb->fw_ver_min); dev_info(&usb_dev->dev, "Please update to version 0.2 or newer"); } kfree(buffer); return ret; }
168,391
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int simple_set_acl(struct inode *inode, struct posix_acl *acl, int type) { int error; if (type == ACL_TYPE_ACCESS) { error = posix_acl_equiv_mode(acl, &inode->i_mode); if (error < 0) return 0; if (error == 0) acl = NULL; } inode->i_ctime = current_time(inode); set_cached_acl(inode, type, acl); return 0; } Commit Message: tmpfs: clear S_ISGID when setting posix ACLs This change was missed the tmpfs modification in In CVE-2016-7097 commit 073931017b49 ("posix_acl: Clear SGID bit when setting file permissions") It can test by xfstest generic/375, which failed to clear setgid bit in the following test case on tmpfs: touch $testfile chown 100:100 $testfile chmod 2755 $testfile _runas -u 100 -g 101 -- setfacl -m u::rwx,g::rwx,o::rwx $testfile Signed-off-by: Gu Zheng <[email protected]> Signed-off-by: Al Viro <[email protected]> CWE ID:
int simple_set_acl(struct inode *inode, struct posix_acl *acl, int type) { int error; if (type == ACL_TYPE_ACCESS) { error = posix_acl_update_mode(inode, &inode->i_mode, &acl); if (error) return error; } inode->i_ctime = current_time(inode); set_cached_acl(inode, type, acl); return 0; }
168,386
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int jpc_pi_nextrpcl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; uint_fast32_t trx0; uint_fast32_t try0; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { xstep = picomp->hsamp * (1 << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (1 << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pchg->rlvlnoend && pi->rlvlno < pi->maxrlvls; ++pi->rlvlno) { for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); if (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) || !(pi->x % (1 << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) || !(pi->y % (1 << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } Commit Message: Fixed numerous integer overflow problems in the code for packet iterators in the JPC decoder. CWE ID: CWE-125
static int jpc_pi_nextrpcl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; uint_fast32_t trx0; uint_fast32_t try0; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { // Check for the potential for overflow problems. if (pirlvl->prcwidthexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2 || pirlvl->prcheightexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2) { return -1; } xstep = picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->rlvlno = pchg->rlvlnostart; pi->rlvlno < pchg->rlvlnoend && pi->rlvlno < pi->maxrlvls; ++pi->rlvlno) { for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < JAS_CAST(int, pchg->compnoend) && pi->compno < pi->numcomps; ++pi->compno, ++pi->picomp) { if (pi->rlvlno >= pi->picomp->numrlvls) { continue; } pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); if (((pi->x == pi->xstart && ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) || !(pi->x % (JAS_CAST(uint_fast32_t, 1) << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) || !(pi->y % (JAS_CAST(uint_fast32_t, 1) << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; }
169,442
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static Image *ReadJPEGImage(const ImageInfo *image_info, ExceptionInfo *exception) { char value[MaxTextExtent]; const char *option; ErrorManager error_manager; Image *image; IndexPacket index; JSAMPLE *volatile jpeg_pixels; JSAMPROW scanline[1]; MagickBooleanType debug, status; MagickSizeType number_pixels; MemoryInfo *memory_info; register ssize_t i; struct jpeg_decompress_struct jpeg_info; struct jpeg_error_mgr jpeg_error; register JSAMPLE *p; size_t units; ssize_t y; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); debug=IsEventLogging(); (void) debug; image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Verify that file size large enough to contain a JPEG datastream. */ if (GetBlobSize(image) < 107) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); /* Initialize JPEG parameters. */ (void) ResetMagickMemory(&error_manager,0,sizeof(error_manager)); (void) ResetMagickMemory(&jpeg_info,0,sizeof(jpeg_info)); (void) ResetMagickMemory(&jpeg_error,0,sizeof(jpeg_error)); jpeg_info.err=jpeg_std_error(&jpeg_error); jpeg_info.err->emit_message=(void (*)(j_common_ptr,int)) JPEGWarningHandler; jpeg_info.err->error_exit=(void (*)(j_common_ptr)) JPEGErrorHandler; memory_info=(MemoryInfo *) NULL; error_manager.image=image; if (setjmp(error_manager.error_recovery) != 0) { jpeg_destroy_decompress(&jpeg_info); if (error_manager.profile != (StringInfo *) NULL) error_manager.profile=DestroyStringInfo(error_manager.profile); (void) CloseBlob(image); number_pixels=(MagickSizeType) image->columns*image->rows; if (number_pixels != 0) return(GetFirstImageInList(image)); InheritException(exception,&image->exception); return(DestroyImage(image)); } jpeg_info.client_data=(void *) &error_manager; jpeg_create_decompress(&jpeg_info); JPEGSourceManager(&jpeg_info,image); jpeg_set_marker_processor(&jpeg_info,JPEG_COM,ReadComment); option=GetImageOption(image_info,"profile:skip"); if (IsOptionMember("ICC",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,ICC_MARKER,ReadICCProfile); if (IsOptionMember("IPTC",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,IPTC_MARKER,ReadIPTCProfile); for (i=1; i < 16; i++) if ((i != 2) && (i != 13) && (i != 14)) if (IsOptionMember("APP",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,(int) (JPEG_APP0+i),ReadProfile); i=(ssize_t) jpeg_read_header(&jpeg_info,TRUE); if ((image_info->colorspace == YCbCrColorspace) || (image_info->colorspace == Rec601YCbCrColorspace) || (image_info->colorspace == Rec709YCbCrColorspace)) jpeg_info.out_color_space=JCS_YCbCr; /* Set image resolution. */ units=0; if ((jpeg_info.saw_JFIF_marker != 0) && (jpeg_info.X_density != 1) && (jpeg_info.Y_density != 1)) { image->x_resolution=(double) jpeg_info.X_density; image->y_resolution=(double) jpeg_info.Y_density; units=(size_t) jpeg_info.density_unit; } if (units == 1) image->units=PixelsPerInchResolution; if (units == 2) image->units=PixelsPerCentimeterResolution; number_pixels=(MagickSizeType) image->columns*image->rows; option=GetImageOption(image_info,"jpeg:size"); if ((option != (const char *) NULL) && (jpeg_info.out_color_space != JCS_YCbCr)) { double scale_factor; GeometryInfo geometry_info; MagickStatusType flags; /* Scale the image. */ flags=ParseGeometry(option,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; jpeg_calc_output_dimensions(&jpeg_info); image->magick_columns=jpeg_info.output_width; image->magick_rows=jpeg_info.output_height; scale_factor=1.0; if (geometry_info.rho != 0.0) scale_factor=jpeg_info.output_width/geometry_info.rho; if ((geometry_info.sigma != 0.0) && (scale_factor > (jpeg_info.output_height/geometry_info.sigma))) scale_factor=jpeg_info.output_height/geometry_info.sigma; jpeg_info.scale_num=1U; jpeg_info.scale_denom=(unsigned int) scale_factor; jpeg_calc_output_dimensions(&jpeg_info); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Scale factor: %.20g",(double) scale_factor); } #if (JPEG_LIB_VERSION >= 61) && defined(D_PROGRESSIVE_SUPPORTED) #if defined(D_LOSSLESS_SUPPORTED) image->interlace=jpeg_info.process == JPROC_PROGRESSIVE ? JPEGInterlace : NoInterlace; image->compression=jpeg_info.process == JPROC_LOSSLESS ? LosslessJPEGCompression : JPEGCompression; if (jpeg_info.data_precision > 8) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "12-bit JPEG not supported. Reducing pixel data to 8 bits","`%s'", image->filename); if (jpeg_info.data_precision == 16) jpeg_info.data_precision=12; #else image->interlace=jpeg_info.progressive_mode != 0 ? JPEGInterlace : NoInterlace; image->compression=JPEGCompression; #endif #else image->compression=JPEGCompression; image->interlace=JPEGInterlace; #endif option=GetImageOption(image_info,"jpeg:colors"); if (option != (const char *) NULL) { /* Let the JPEG library quantize for us. */ jpeg_info.quantize_colors=TRUE; jpeg_info.desired_number_of_colors=(int) StringToUnsignedLong(option); } option=GetImageOption(image_info,"jpeg:block-smoothing"); if (option != (const char *) NULL) jpeg_info.do_block_smoothing=IsStringTrue(option) != MagickFalse ? TRUE : FALSE; jpeg_info.dct_method=JDCT_FLOAT; option=GetImageOption(image_info,"jpeg:dct-method"); if (option != (const char *) NULL) switch (*option) { case 'D': case 'd': { if (LocaleCompare(option,"default") == 0) jpeg_info.dct_method=JDCT_DEFAULT; break; } case 'F': case 'f': { if (LocaleCompare(option,"fastest") == 0) jpeg_info.dct_method=JDCT_FASTEST; if (LocaleCompare(option,"float") == 0) jpeg_info.dct_method=JDCT_FLOAT; break; } case 'I': case 'i': { if (LocaleCompare(option,"ifast") == 0) jpeg_info.dct_method=JDCT_IFAST; if (LocaleCompare(option,"islow") == 0) jpeg_info.dct_method=JDCT_ISLOW; break; } } option=GetImageOption(image_info,"jpeg:fancy-upsampling"); if (option != (const char *) NULL) jpeg_info.do_fancy_upsampling=IsStringTrue(option) != MagickFalse ? TRUE : FALSE; (void) jpeg_start_decompress(&jpeg_info); image->columns=jpeg_info.output_width; image->rows=jpeg_info.output_height; image->depth=(size_t) jpeg_info.data_precision; switch (jpeg_info.out_color_space) { case JCS_RGB: default: { (void) SetImageColorspace(image,sRGBColorspace); break; } case JCS_GRAYSCALE: { (void) SetImageColorspace(image,GRAYColorspace); break; } case JCS_YCbCr: { (void) SetImageColorspace(image,YCbCrColorspace); break; } case JCS_CMYK: { (void) SetImageColorspace(image,CMYKColorspace); break; } } if (IsITUFaxImage(image) != MagickFalse) { (void) SetImageColorspace(image,LabColorspace); jpeg_info.out_color_space=JCS_YCbCr; } option=GetImageOption(image_info,"jpeg:colors"); if (option != (const char *) NULL) if (AcquireImageColormap(image,StringToUnsignedLong(option)) == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } if ((jpeg_info.output_components == 1) && (jpeg_info.quantize_colors == 0)) { size_t colors; colors=(size_t) GetQuantumRange(image->depth)+1; if (AcquireImageColormap(image,colors) == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } } if (image->debug != MagickFalse) { if (image->interlace != NoInterlace) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: progressive"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: nonprogressive"); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Data precision: %d", (int) jpeg_info.data_precision); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %dx%d", (int) jpeg_info.output_width,(int) jpeg_info.output_height); } JPEGSetImageQuality(&jpeg_info,image); JPEGSetImageSamplingFactor(&jpeg_info,image); (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) jpeg_info.out_color_space); (void) SetImageProperty(image,"jpeg:colorspace",value); if (image_info->ping != MagickFalse) { jpeg_destroy_decompress(&jpeg_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { jpeg_destroy_decompress(&jpeg_info); InheritException(exception,&image->exception); return(DestroyImageList(image)); } if ((jpeg_info.output_components != 1) && (jpeg_info.output_components != 3) && (jpeg_info.output_components != 4)) { jpeg_destroy_decompress(&jpeg_info); ThrowReaderException(CorruptImageError,"ImageTypeNotSupported"); } memory_info=AcquireVirtualMemory((size_t) image->columns, jpeg_info.output_components*sizeof(*jpeg_pixels)); if (memory_info == (MemoryInfo *) NULL) { jpeg_destroy_decompress(&jpeg_info); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } jpeg_pixels=(JSAMPLE *) GetVirtualMemoryBlob(memory_info); /* Convert JPEG pixels to pixel packets. */ if (setjmp(error_manager.error_recovery) != 0) { if (memory_info != (MemoryInfo *) NULL) memory_info=RelinquishVirtualMemory(memory_info); jpeg_destroy_decompress(&jpeg_info); (void) CloseBlob(image); number_pixels=(MagickSizeType) image->columns*image->rows; if (number_pixels != 0) return(GetFirstImageInList(image)); return(DestroyImage(image)); } if (jpeg_info.quantize_colors != 0) { image->colors=(size_t) jpeg_info.actual_number_of_colors; if (jpeg_info.out_color_space == JCS_GRAYSCALE) for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum(jpeg_info.colormap[0][i]); image->colormap[i].green=image->colormap[i].red; image->colormap[i].blue=image->colormap[i].red; image->colormap[i].opacity=OpaqueOpacity; } else for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum(jpeg_info.colormap[0][i]); image->colormap[i].green=ScaleCharToQuantum(jpeg_info.colormap[1][i]); image->colormap[i].blue=ScaleCharToQuantum(jpeg_info.colormap[2][i]); image->colormap[i].opacity=OpaqueOpacity; } } scanline[0]=(JSAMPROW) jpeg_pixels; for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (jpeg_read_scanlines(&jpeg_info,scanline,1) != 1) { (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageWarning,"SkipToSyncByte","`%s'",image->filename); continue; } p=jpeg_pixels; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); if (jpeg_info.data_precision > 8) { unsigned short scale; scale=65535/(unsigned short) GetQuantumRange((size_t) jpeg_info.data_precision); if (jpeg_info.output_components == 1) for (x=0; x < (ssize_t) image->columns; x++) { size_t pixel; pixel=(size_t) (scale*GETJSAMPLE(*p)); index=ConstrainColormapIndex(image,pixel); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); p++; q++; } else if (image->colorspace != CMYKColorspace) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelGreen(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelBlue(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelOpacity(q,OpaqueOpacity); q++; } else for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelMagenta(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelYellow(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelBlack(indexes+x,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelOpacity(q,OpaqueOpacity); q++; } } else if (jpeg_info.output_components == 1) for (x=0; x < (ssize_t) image->columns; x++) { index=ConstrainColormapIndex(image,(size_t) GETJSAMPLE(*p)); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); p++; q++; } else if (image->colorspace != CMYKColorspace) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelOpacity(q,OpaqueOpacity); q++; } else for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelMagenta(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelYellow(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelBlack(indexes+x,QuantumRange-ScaleCharToQuantum( (unsigned char) GETJSAMPLE(*p++))); SetPixelOpacity(q,OpaqueOpacity); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) { jpeg_abort_decompress(&jpeg_info); break; } } if (status != MagickFalse) { error_manager.finished=MagickTrue; if (setjmp(error_manager.error_recovery) == 0) (void) jpeg_finish_decompress(&jpeg_info); } /* Free jpeg resources. */ jpeg_destroy_decompress(&jpeg_info); memory_info=RelinquishVirtualMemory(memory_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } Commit Message: Zero pixel buffer CWE ID: CWE-200
static Image *ReadJPEGImage(const ImageInfo *image_info, ExceptionInfo *exception) { char value[MaxTextExtent]; const char *option; ErrorManager error_manager; Image *image; IndexPacket index; JSAMPLE *volatile jpeg_pixels; JSAMPROW scanline[1]; MagickBooleanType debug, status; MagickSizeType number_pixels; MemoryInfo *memory_info; register ssize_t i; struct jpeg_decompress_struct jpeg_info; struct jpeg_error_mgr jpeg_error; register JSAMPLE *p; size_t units; ssize_t y; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); debug=IsEventLogging(); (void) debug; image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Verify that file size large enough to contain a JPEG datastream. */ if (GetBlobSize(image) < 107) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); /* Initialize JPEG parameters. */ (void) ResetMagickMemory(&error_manager,0,sizeof(error_manager)); (void) ResetMagickMemory(&jpeg_info,0,sizeof(jpeg_info)); (void) ResetMagickMemory(&jpeg_error,0,sizeof(jpeg_error)); jpeg_info.err=jpeg_std_error(&jpeg_error); jpeg_info.err->emit_message=(void (*)(j_common_ptr,int)) JPEGWarningHandler; jpeg_info.err->error_exit=(void (*)(j_common_ptr)) JPEGErrorHandler; memory_info=(MemoryInfo *) NULL; error_manager.image=image; if (setjmp(error_manager.error_recovery) != 0) { jpeg_destroy_decompress(&jpeg_info); if (error_manager.profile != (StringInfo *) NULL) error_manager.profile=DestroyStringInfo(error_manager.profile); (void) CloseBlob(image); number_pixels=(MagickSizeType) image->columns*image->rows; if (number_pixels != 0) return(GetFirstImageInList(image)); InheritException(exception,&image->exception); return(DestroyImage(image)); } jpeg_info.client_data=(void *) &error_manager; jpeg_create_decompress(&jpeg_info); JPEGSourceManager(&jpeg_info,image); jpeg_set_marker_processor(&jpeg_info,JPEG_COM,ReadComment); option=GetImageOption(image_info,"profile:skip"); if (IsOptionMember("ICC",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,ICC_MARKER,ReadICCProfile); if (IsOptionMember("IPTC",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,IPTC_MARKER,ReadIPTCProfile); for (i=1; i < 16; i++) if ((i != 2) && (i != 13) && (i != 14)) if (IsOptionMember("APP",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,(int) (JPEG_APP0+i),ReadProfile); i=(ssize_t) jpeg_read_header(&jpeg_info,TRUE); if ((image_info->colorspace == YCbCrColorspace) || (image_info->colorspace == Rec601YCbCrColorspace) || (image_info->colorspace == Rec709YCbCrColorspace)) jpeg_info.out_color_space=JCS_YCbCr; /* Set image resolution. */ units=0; if ((jpeg_info.saw_JFIF_marker != 0) && (jpeg_info.X_density != 1) && (jpeg_info.Y_density != 1)) { image->x_resolution=(double) jpeg_info.X_density; image->y_resolution=(double) jpeg_info.Y_density; units=(size_t) jpeg_info.density_unit; } if (units == 1) image->units=PixelsPerInchResolution; if (units == 2) image->units=PixelsPerCentimeterResolution; number_pixels=(MagickSizeType) image->columns*image->rows; option=GetImageOption(image_info,"jpeg:size"); if ((option != (const char *) NULL) && (jpeg_info.out_color_space != JCS_YCbCr)) { double scale_factor; GeometryInfo geometry_info; MagickStatusType flags; /* Scale the image. */ flags=ParseGeometry(option,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; jpeg_calc_output_dimensions(&jpeg_info); image->magick_columns=jpeg_info.output_width; image->magick_rows=jpeg_info.output_height; scale_factor=1.0; if (geometry_info.rho != 0.0) scale_factor=jpeg_info.output_width/geometry_info.rho; if ((geometry_info.sigma != 0.0) && (scale_factor > (jpeg_info.output_height/geometry_info.sigma))) scale_factor=jpeg_info.output_height/geometry_info.sigma; jpeg_info.scale_num=1U; jpeg_info.scale_denom=(unsigned int) scale_factor; jpeg_calc_output_dimensions(&jpeg_info); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Scale factor: %.20g",(double) scale_factor); } #if (JPEG_LIB_VERSION >= 61) && defined(D_PROGRESSIVE_SUPPORTED) #if defined(D_LOSSLESS_SUPPORTED) image->interlace=jpeg_info.process == JPROC_PROGRESSIVE ? JPEGInterlace : NoInterlace; image->compression=jpeg_info.process == JPROC_LOSSLESS ? LosslessJPEGCompression : JPEGCompression; if (jpeg_info.data_precision > 8) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "12-bit JPEG not supported. Reducing pixel data to 8 bits","`%s'", image->filename); if (jpeg_info.data_precision == 16) jpeg_info.data_precision=12; #else image->interlace=jpeg_info.progressive_mode != 0 ? JPEGInterlace : NoInterlace; image->compression=JPEGCompression; #endif #else image->compression=JPEGCompression; image->interlace=JPEGInterlace; #endif option=GetImageOption(image_info,"jpeg:colors"); if (option != (const char *) NULL) { /* Let the JPEG library quantize for us. */ jpeg_info.quantize_colors=TRUE; jpeg_info.desired_number_of_colors=(int) StringToUnsignedLong(option); } option=GetImageOption(image_info,"jpeg:block-smoothing"); if (option != (const char *) NULL) jpeg_info.do_block_smoothing=IsStringTrue(option) != MagickFalse ? TRUE : FALSE; jpeg_info.dct_method=JDCT_FLOAT; option=GetImageOption(image_info,"jpeg:dct-method"); if (option != (const char *) NULL) switch (*option) { case 'D': case 'd': { if (LocaleCompare(option,"default") == 0) jpeg_info.dct_method=JDCT_DEFAULT; break; } case 'F': case 'f': { if (LocaleCompare(option,"fastest") == 0) jpeg_info.dct_method=JDCT_FASTEST; if (LocaleCompare(option,"float") == 0) jpeg_info.dct_method=JDCT_FLOAT; break; } case 'I': case 'i': { if (LocaleCompare(option,"ifast") == 0) jpeg_info.dct_method=JDCT_IFAST; if (LocaleCompare(option,"islow") == 0) jpeg_info.dct_method=JDCT_ISLOW; break; } } option=GetImageOption(image_info,"jpeg:fancy-upsampling"); if (option != (const char *) NULL) jpeg_info.do_fancy_upsampling=IsStringTrue(option) != MagickFalse ? TRUE : FALSE; (void) jpeg_start_decompress(&jpeg_info); image->columns=jpeg_info.output_width; image->rows=jpeg_info.output_height; image->depth=(size_t) jpeg_info.data_precision; switch (jpeg_info.out_color_space) { case JCS_RGB: default: { (void) SetImageColorspace(image,sRGBColorspace); break; } case JCS_GRAYSCALE: { (void) SetImageColorspace(image,GRAYColorspace); break; } case JCS_YCbCr: { (void) SetImageColorspace(image,YCbCrColorspace); break; } case JCS_CMYK: { (void) SetImageColorspace(image,CMYKColorspace); break; } } if (IsITUFaxImage(image) != MagickFalse) { (void) SetImageColorspace(image,LabColorspace); jpeg_info.out_color_space=JCS_YCbCr; } option=GetImageOption(image_info,"jpeg:colors"); if (option != (const char *) NULL) if (AcquireImageColormap(image,StringToUnsignedLong(option)) == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } if ((jpeg_info.output_components == 1) && (jpeg_info.quantize_colors == 0)) { size_t colors; colors=(size_t) GetQuantumRange(image->depth)+1; if (AcquireImageColormap(image,colors) == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } } if (image->debug != MagickFalse) { if (image->interlace != NoInterlace) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: progressive"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: nonprogressive"); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Data precision: %d", (int) jpeg_info.data_precision); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %dx%d", (int) jpeg_info.output_width,(int) jpeg_info.output_height); } JPEGSetImageQuality(&jpeg_info,image); JPEGSetImageSamplingFactor(&jpeg_info,image); (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) jpeg_info.out_color_space); (void) SetImageProperty(image,"jpeg:colorspace",value); if (image_info->ping != MagickFalse) { jpeg_destroy_decompress(&jpeg_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { jpeg_destroy_decompress(&jpeg_info); InheritException(exception,&image->exception); return(DestroyImageList(image)); } if ((jpeg_info.output_components != 1) && (jpeg_info.output_components != 3) && (jpeg_info.output_components != 4)) { jpeg_destroy_decompress(&jpeg_info); ThrowReaderException(CorruptImageError,"ImageTypeNotSupported"); } memory_info=AcquireVirtualMemory((size_t) image->columns, jpeg_info.output_components*sizeof(*jpeg_pixels)); if (memory_info == (MemoryInfo *) NULL) { jpeg_destroy_decompress(&jpeg_info); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } jpeg_pixels=(JSAMPLE *) GetVirtualMemoryBlob(memory_info); (void) ResetMagickMemory(jpeg_pixels,0,image->columns* jpeg_info.output_components*sizeof(*jpeg_pixels)); /* Convert JPEG pixels to pixel packets. */ if (setjmp(error_manager.error_recovery) != 0) { if (memory_info != (MemoryInfo *) NULL) memory_info=RelinquishVirtualMemory(memory_info); jpeg_destroy_decompress(&jpeg_info); (void) CloseBlob(image); number_pixels=(MagickSizeType) image->columns*image->rows; if (number_pixels != 0) return(GetFirstImageInList(image)); return(DestroyImage(image)); } if (jpeg_info.quantize_colors != 0) { image->colors=(size_t) jpeg_info.actual_number_of_colors; if (jpeg_info.out_color_space == JCS_GRAYSCALE) for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum(jpeg_info.colormap[0][i]); image->colormap[i].green=image->colormap[i].red; image->colormap[i].blue=image->colormap[i].red; image->colormap[i].opacity=OpaqueOpacity; } else for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum(jpeg_info.colormap[0][i]); image->colormap[i].green=ScaleCharToQuantum(jpeg_info.colormap[1][i]); image->colormap[i].blue=ScaleCharToQuantum(jpeg_info.colormap[2][i]); image->colormap[i].opacity=OpaqueOpacity; } } scanline[0]=(JSAMPROW) jpeg_pixels; for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (jpeg_read_scanlines(&jpeg_info,scanline,1) != 1) { (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageWarning,"SkipToSyncByte","`%s'",image->filename); continue; } p=jpeg_pixels; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); if (jpeg_info.data_precision > 8) { unsigned short scale; scale=65535/(unsigned short) GetQuantumRange((size_t) jpeg_info.data_precision); if (jpeg_info.output_components == 1) for (x=0; x < (ssize_t) image->columns; x++) { size_t pixel; pixel=(size_t) (scale*GETJSAMPLE(*p)); index=ConstrainColormapIndex(image,pixel); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); p++; q++; } else if (image->colorspace != CMYKColorspace) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelGreen(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelBlue(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelOpacity(q,OpaqueOpacity); q++; } else for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelMagenta(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelYellow(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelBlack(indexes+x,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelOpacity(q,OpaqueOpacity); q++; } } else if (jpeg_info.output_components == 1) for (x=0; x < (ssize_t) image->columns; x++) { index=ConstrainColormapIndex(image,(size_t) GETJSAMPLE(*p)); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); p++; q++; } else if (image->colorspace != CMYKColorspace) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelOpacity(q,OpaqueOpacity); q++; } else for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelMagenta(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelYellow(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelBlack(indexes+x,QuantumRange-ScaleCharToQuantum( (unsigned char) GETJSAMPLE(*p++))); SetPixelOpacity(q,OpaqueOpacity); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) { jpeg_abort_decompress(&jpeg_info); break; } } if (status != MagickFalse) { error_manager.finished=MagickTrue; if (setjmp(error_manager.error_recovery) == 0) (void) jpeg_finish_decompress(&jpeg_info); } /* Free jpeg resources. */ jpeg_destroy_decompress(&jpeg_info); memory_info=RelinquishVirtualMemory(memory_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); }
168,036
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: XSLStyleSheet::XSLStyleSheet(Node* parentNode, const String& originalURL, const KURL& finalURL, bool embedded) : m_ownerNode(parentNode) , m_originalURL(originalURL) , m_finalURL(finalURL) , m_isDisabled(false) , m_embedded(embedded) , m_processed(true) // The root sheet starts off processed. , m_stylesheetDoc(0) , m_stylesheetDocTaken(false) , m_parentStyleSheet(0) { } Commit Message: Avoid reparsing an XSLT stylesheet after the first failure. Certain libxslt versions appear to leave the doc in an invalid state when parsing fails. We should cache this result and avoid re-parsing. (The test cannot be converted to text-only due to its invalid stylesheet). [email protected],[email protected],[email protected] BUG=271939 Review URL: https://chromiumcodereview.appspot.com/23103007 git-svn-id: svn://svn.chromium.org/blink/trunk@156248 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-399
XSLStyleSheet::XSLStyleSheet(Node* parentNode, const String& originalURL, const KURL& finalURL, bool embedded) : m_ownerNode(parentNode) , m_originalURL(originalURL) , m_finalURL(finalURL) , m_isDisabled(false) , m_embedded(embedded) , m_processed(true) // The root sheet starts off processed. , m_stylesheetDoc(0) , m_stylesheetDocTaken(false) , m_compilationFailed(false) , m_parentStyleSheet(0) { }
171,184
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static inline void find_entity_for_char( unsigned int k, enum entity_charset charset, const entity_stage1_row *table, const unsigned char **entity, size_t *entity_len, unsigned char *old, size_t oldlen, size_t *cursor) { unsigned stage1_idx = ENT_STAGE1_INDEX(k); const entity_stage3_row *c; if (stage1_idx > 0x1D) { *entity = NULL; *entity_len = 0; return; } c = &table[stage1_idx][ENT_STAGE2_INDEX(k)][ENT_STAGE3_INDEX(k)]; if (!c->ambiguous) { *entity = (const unsigned char *)c->data.ent.entity; *entity_len = c->data.ent.entity_len; } else { /* peek at next char */ size_t cursor_before = *cursor; int status = SUCCESS; unsigned next_char; if (!(*cursor < oldlen)) goto no_suitable_2nd; next_char = get_next_char(charset, old, oldlen, cursor, &status); if (status == FAILURE) goto no_suitable_2nd; { const entity_multicodepoint_row *s, *e; s = &c->data.multicodepoint_table[1]; e = s - 1 + c->data.multicodepoint_table[0].leading_entry.size; /* we could do a binary search but it's not worth it since we have * at most two entries... */ for ( ; s <= e; s++) { if (s->normal_entry.second_cp == next_char) { *entity = s->normal_entry.entity; *entity_len = s->normal_entry.entity_len; return; } } } no_suitable_2nd: *cursor = cursor_before; *entity = (const unsigned char *) c->data.multicodepoint_table[0].leading_entry.default_entity; *entity_len = c->data.multicodepoint_table[0].leading_entry.default_entity_len; } } Commit Message: Fix bug #72135 - don't create strings with lengths outside int range CWE ID: CWE-190
static inline void find_entity_for_char( unsigned int k, enum entity_charset charset, const entity_stage1_row *table, const unsigned char **entity, size_t *entity_len, unsigned char *old, size_t oldlen, size_t *cursor) { unsigned stage1_idx = ENT_STAGE1_INDEX(k); const entity_stage3_row *c; if (stage1_idx > 0x1D) { *entity = NULL; *entity_len = 0; return; } c = &table[stage1_idx][ENT_STAGE2_INDEX(k)][ENT_STAGE3_INDEX(k)]; if (!c->ambiguous) { *entity = (const unsigned char *)c->data.ent.entity; *entity_len = c->data.ent.entity_len; } else { /* peek at next char */ size_t cursor_before = *cursor; int status = SUCCESS; unsigned next_char; if (!(*cursor < oldlen)) goto no_suitable_2nd; next_char = get_next_char(charset, old, oldlen, cursor, &status); if (status == FAILURE) goto no_suitable_2nd; { const entity_multicodepoint_row *s, *e; s = &c->data.multicodepoint_table[1]; e = s - 1 + c->data.multicodepoint_table[0].leading_entry.size; /* we could do a binary search but it's not worth it since we have * at most two entries... */ for ( ; s <= e; s++) { if (s->normal_entry.second_cp == next_char) { *entity = s->normal_entry.entity; *entity_len = s->normal_entry.entity_len; return; } } } no_suitable_2nd: *cursor = cursor_before; *entity = (const unsigned char *) c->data.multicodepoint_table[0].leading_entry.default_entity; *entity_len = c->data.multicodepoint_table[0].leading_entry.default_entity_len; } }
167,171
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: CopyKeyAliasesToKeymap(struct xkb_keymap *keymap, KeyNamesInfo *info) { AliasInfo *alias; unsigned i, num_key_aliases; struct xkb_key_alias *key_aliases; /* * Do some sanity checking on the aliases. We can't do it before * because keys and their aliases may be added out-of-order. */ num_key_aliases = 0; darray_foreach(alias, info->aliases) { /* Check that ->real is a key. */ if (!XkbKeyByName(keymap, alias->real, false)) { log_vrb(info->ctx, 5, "Attempt to alias %s to non-existent key %s; Ignored\n", KeyNameText(info->ctx, alias->alias), KeyNameText(info->ctx, alias->real)); alias->real = XKB_ATOM_NONE; continue; } /* Check that ->alias is not a key. */ if (XkbKeyByName(keymap, alias->alias, false)) { log_vrb(info->ctx, 5, "Attempt to create alias with the name of a real key; " "Alias \"%s = %s\" ignored\n", KeyNameText(info->ctx, alias->alias), KeyNameText(info->ctx, alias->real)); alias->real = XKB_ATOM_NONE; continue; } num_key_aliases++; } /* Copy key aliases. */ key_aliases = NULL; if (num_key_aliases > 0) { key_aliases = calloc(num_key_aliases, sizeof(*key_aliases)); if (!key_aliases) return false; } i = 0; darray_foreach(alias, info->aliases) { if (alias->real != XKB_ATOM_NONE) { key_aliases[i].alias = alias->alias; key_aliases[i].real = alias->real; i++; } } keymap->num_key_aliases = num_key_aliases; keymap->key_aliases = key_aliases; return true; } Commit Message: keycodes: don't try to copy zero key aliases Move the aliases copy to within the (num_key_aliases > 0) block. Passing info->aliases into this fuction with invalid aliases will cause log messages but num_key_aliases stays on 0. The key_aliases array is never allocated and remains NULL. We then loop through the aliases, causing a null-pointer dereference. Signed-off-by: Peter Hutterer <[email protected]> CWE ID: CWE-476
CopyKeyAliasesToKeymap(struct xkb_keymap *keymap, KeyNamesInfo *info) { AliasInfo *alias; unsigned i, num_key_aliases; struct xkb_key_alias *key_aliases; /* * Do some sanity checking on the aliases. We can't do it before * because keys and their aliases may be added out-of-order. */ num_key_aliases = 0; darray_foreach(alias, info->aliases) { /* Check that ->real is a key. */ if (!XkbKeyByName(keymap, alias->real, false)) { log_vrb(info->ctx, 5, "Attempt to alias %s to non-existent key %s; Ignored\n", KeyNameText(info->ctx, alias->alias), KeyNameText(info->ctx, alias->real)); alias->real = XKB_ATOM_NONE; continue; } /* Check that ->alias is not a key. */ if (XkbKeyByName(keymap, alias->alias, false)) { log_vrb(info->ctx, 5, "Attempt to create alias with the name of a real key; " "Alias \"%s = %s\" ignored\n", KeyNameText(info->ctx, alias->alias), KeyNameText(info->ctx, alias->real)); alias->real = XKB_ATOM_NONE; continue; } num_key_aliases++; } /* Copy key aliases. */ key_aliases = NULL; if (num_key_aliases > 0) { key_aliases = calloc(num_key_aliases, sizeof(*key_aliases)); if (!key_aliases) return false; i = 0; darray_foreach(alias, info->aliases) { if (alias->real != XKB_ATOM_NONE) { key_aliases[i].alias = alias->alias; key_aliases[i].real = alias->real; i++; } } } keymap->num_key_aliases = num_key_aliases; keymap->key_aliases = key_aliases; return true; }
169,092
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: std::unique_ptr<WebContents> CreateWebContents() { std::unique_ptr<WebContents> web_contents = CreateTestWebContents(); content::WebContentsTester::For(web_contents.get()) ->NavigateAndCommit(GURL("https://www.example.com")); return web_contents; } Commit Message: Connect the LocalDB to TabManager. Bug: 773382 Change-Id: Iec8fe5226ee175105d51f300f30b4865478ac099 Reviewed-on: https://chromium-review.googlesource.com/1118611 Commit-Queue: Sébastien Marchand <[email protected]> Reviewed-by: François Doray <[email protected]> Cr-Commit-Position: refs/heads/master@{#572871} CWE ID:
std::unique_ptr<WebContents> CreateWebContents() { std::unique_ptr<WebContents> web_contents = CreateTestWebContents(); ResourceCoordinatorTabHelper::CreateForWebContents(web_contents.get()); content::WebContentsTester::For(web_contents.get()) ->NavigateAndCommit(GURL("https://www.example.com")); base::RepeatingClosure run_loop_cb = base::BindRepeating( &base::TestMockTimeTaskRunner::RunUntilIdle, task_runner_); testing::WaitForLocalDBEntryToBeInitialized(web_contents.get(), run_loop_cb); testing::ExpireLocalDBObservationWindows(web_contents.get()); return web_contents; }
172,230
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static EncodedJSValue JSC_HOST_CALL jsTestObjPrototypeFunctionOverloadedMethod6(ExecState* exec) { JSValue thisValue = exec->hostThisValue(); if (!thisValue.inherits(&JSTestObj::s_info)) return throwVMTypeError(exec); JSTestObj* castedThis = jsCast<JSTestObj*>(asObject(thisValue)); ASSERT_GC_OBJECT_INHERITS(castedThis, &JSTestObj::s_info); TestObj* impl = static_cast<TestObj*>(castedThis->impl()); if (exec->argumentCount() < 1) return throwVMError(exec, createTypeError(exec, "Not enough arguments")); DOMStringList* listArg(toDOMStringList(MAYBE_MISSING_PARAMETER(exec, 0, DefaultIsUndefined))); if (exec->hadException()) return JSValue::encode(jsUndefined()); impl->overloadedMethod(listArg); return JSValue::encode(jsUndefined()); } Commit Message: [JSC] Implement a helper method createNotEnoughArgumentsError() https://bugs.webkit.org/show_bug.cgi?id=85102 Reviewed by Geoffrey Garen. In bug 84787, kbr@ requested to avoid hard-coding createTypeError(exec, "Not enough arguments") here and there. This patch implements createNotEnoughArgumentsError(exec) and uses it in JSC bindings. c.f. a corresponding bug for V8 bindings is bug 85097. Source/JavaScriptCore: * runtime/Error.cpp: (JSC::createNotEnoughArgumentsError): (JSC): * runtime/Error.h: (JSC): Source/WebCore: Test: bindings/scripts/test/TestObj.idl * bindings/scripts/CodeGeneratorJS.pm: Modified as described above. (GenerateArgumentsCountCheck): * bindings/js/JSDataViewCustom.cpp: Ditto. (WebCore::getDataViewMember): (WebCore::setDataViewMember): * bindings/js/JSDeprecatedPeerConnectionCustom.cpp: (WebCore::JSDeprecatedPeerConnectionConstructor::constructJSDeprecatedPeerConnection): * bindings/js/JSDirectoryEntryCustom.cpp: (WebCore::JSDirectoryEntry::getFile): (WebCore::JSDirectoryEntry::getDirectory): * bindings/js/JSSharedWorkerCustom.cpp: (WebCore::JSSharedWorkerConstructor::constructJSSharedWorker): * bindings/js/JSWebKitMutationObserverCustom.cpp: (WebCore::JSWebKitMutationObserverConstructor::constructJSWebKitMutationObserver): (WebCore::JSWebKitMutationObserver::observe): * bindings/js/JSWorkerCustom.cpp: (WebCore::JSWorkerConstructor::constructJSWorker): * bindings/scripts/test/JS/JSFloat64Array.cpp: Updated run-bindings-tests. (WebCore::jsFloat64ArrayPrototypeFunctionFoo): * bindings/scripts/test/JS/JSTestActiveDOMObject.cpp: (WebCore::jsTestActiveDOMObjectPrototypeFunctionExcitingFunction): (WebCore::jsTestActiveDOMObjectPrototypeFunctionPostMessage): * bindings/scripts/test/JS/JSTestCustomNamedGetter.cpp: (WebCore::jsTestCustomNamedGetterPrototypeFunctionAnotherFunction): * bindings/scripts/test/JS/JSTestEventTarget.cpp: (WebCore::jsTestEventTargetPrototypeFunctionItem): (WebCore::jsTestEventTargetPrototypeFunctionAddEventListener): (WebCore::jsTestEventTargetPrototypeFunctionRemoveEventListener): (WebCore::jsTestEventTargetPrototypeFunctionDispatchEvent): * bindings/scripts/test/JS/JSTestInterface.cpp: (WebCore::JSTestInterfaceConstructor::constructJSTestInterface): (WebCore::jsTestInterfacePrototypeFunctionSupplementalMethod2): * bindings/scripts/test/JS/JSTestMediaQueryListListener.cpp: (WebCore::jsTestMediaQueryListListenerPrototypeFunctionMethod): * bindings/scripts/test/JS/JSTestNamedConstructor.cpp: (WebCore::JSTestNamedConstructorNamedConstructor::constructJSTestNamedConstructor): * bindings/scripts/test/JS/JSTestObj.cpp: (WebCore::JSTestObjConstructor::constructJSTestObj): (WebCore::jsTestObjPrototypeFunctionVoidMethodWithArgs): (WebCore::jsTestObjPrototypeFunctionIntMethodWithArgs): (WebCore::jsTestObjPrototypeFunctionObjMethodWithArgs): (WebCore::jsTestObjPrototypeFunctionMethodWithSequenceArg): (WebCore::jsTestObjPrototypeFunctionMethodReturningSequence): (WebCore::jsTestObjPrototypeFunctionMethodThatRequiresAllArgsAndThrows): (WebCore::jsTestObjPrototypeFunctionSerializedValue): (WebCore::jsTestObjPrototypeFunctionIdbKey): (WebCore::jsTestObjPrototypeFunctionOptionsObject): (WebCore::jsTestObjPrototypeFunctionAddEventListener): (WebCore::jsTestObjPrototypeFunctionRemoveEventListener): (WebCore::jsTestObjPrototypeFunctionMethodWithNonOptionalArgAndOptionalArg): (WebCore::jsTestObjPrototypeFunctionMethodWithNonOptionalArgAndTwoOptionalArgs): (WebCore::jsTestObjPrototypeFunctionMethodWithCallbackArg): (WebCore::jsTestObjPrototypeFunctionMethodWithNonCallbackArgAndCallbackArg): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod1): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod2): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod3): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod4): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod5): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod6): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod7): (WebCore::jsTestObjConstructorFunctionClassMethod2): (WebCore::jsTestObjConstructorFunctionOverloadedMethod11): (WebCore::jsTestObjConstructorFunctionOverloadedMethod12): (WebCore::jsTestObjPrototypeFunctionMethodWithUnsignedLongArray): (WebCore::jsTestObjPrototypeFunctionConvert1): (WebCore::jsTestObjPrototypeFunctionConvert2): (WebCore::jsTestObjPrototypeFunctionConvert3): (WebCore::jsTestObjPrototypeFunctionConvert4): (WebCore::jsTestObjPrototypeFunctionConvert5): (WebCore::jsTestObjPrototypeFunctionStrictFunction): * bindings/scripts/test/JS/JSTestSerializedScriptValueInterface.cpp: (WebCore::JSTestSerializedScriptValueInterfaceConstructor::constructJSTestSerializedScriptValueInterface): (WebCore::jsTestSerializedScriptValueInterfacePrototypeFunctionAcceptTransferList): git-svn-id: svn://svn.chromium.org/blink/trunk@115536 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-20
static EncodedJSValue JSC_HOST_CALL jsTestObjPrototypeFunctionOverloadedMethod6(ExecState* exec) { JSValue thisValue = exec->hostThisValue(); if (!thisValue.inherits(&JSTestObj::s_info)) return throwVMTypeError(exec); JSTestObj* castedThis = jsCast<JSTestObj*>(asObject(thisValue)); ASSERT_GC_OBJECT_INHERITS(castedThis, &JSTestObj::s_info); TestObj* impl = static_cast<TestObj*>(castedThis->impl()); if (exec->argumentCount() < 1) return throwVMError(exec, createNotEnoughArgumentsError(exec)); DOMStringList* listArg(toDOMStringList(MAYBE_MISSING_PARAMETER(exec, 0, DefaultIsUndefined))); if (exec->hadException()) return JSValue::encode(jsUndefined()); impl->overloadedMethod(listArg); return JSValue::encode(jsUndefined()); }
170,605
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) { int mxsize, cmd_size, k; int input_size, blocking; unsigned char opcode; Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; struct sg_header old_hdr; sg_io_hdr_t *hp; unsigned char cmnd[SG_MAX_CDB_SIZE]; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_write: count=%d\n", (int) count)); if (atomic_read(&sdp->detaching)) return -ENODEV; if (!((filp->f_flags & O_NONBLOCK) || scsi_block_when_processing_errors(sdp->device))) return -ENXIO; if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; /* protects following copy_from_user()s + get_user()s */ if (count < SZ_SG_HEADER) return -EIO; if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER)) return -EFAULT; blocking = !(filp->f_flags & O_NONBLOCK); if (old_hdr.reply_len < 0) return sg_new_write(sfp, filp, buf, count, blocking, 0, 0, NULL); if (count < (SZ_SG_HEADER + 6)) return -EIO; /* The minimum scsi command length is 6 bytes. */ if (!(srp = sg_add_request(sfp))) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp, "sg_write: queue full\n")); return -EDOM; } buf += SZ_SG_HEADER; __get_user(opcode, buf); if (sfp->next_cmd_len > 0) { cmd_size = sfp->next_cmd_len; sfp->next_cmd_len = 0; /* reset so only this write() effected */ } else { cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */ if ((opcode >= 0xc0) && old_hdr.twelve_byte) cmd_size = 12; } SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); /* Determine buffer size. */ input_size = count - cmd_size; mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len; mxsize -= SZ_SG_HEADER; input_size -= SZ_SG_HEADER; if (input_size < 0) { sg_remove_request(sfp, srp); return -EIO; /* User did not pass enough bytes for this command. */ } hp = &srp->header; hp->interface_id = '\0'; /* indicator of old interface tunnelled */ hp->cmd_len = (unsigned char) cmd_size; hp->iovec_count = 0; hp->mx_sb_len = 0; if (input_size > 0) hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ? SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV; else hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; hp->dxfer_len = mxsize; if ((hp->dxfer_direction == SG_DXFER_TO_DEV) || (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV)) hp->dxferp = (char __user *)buf + cmd_size; else hp->dxferp = NULL; hp->sbp = NULL; hp->timeout = old_hdr.reply_len; /* structure abuse ... */ hp->flags = input_size; /* structure abuse ... */ hp->pack_id = old_hdr.pack_id; hp->usr_ptr = NULL; if (__copy_from_user(cmnd, buf, cmd_size)) return -EFAULT; /* * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, * but is is possible that the app intended SG_DXFER_TO_DEV, because there * is a non-zero input_size, so emit a warning. */ if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) { static char cmd[TASK_COMM_LEN]; if (strcmp(current->comm, cmd)) { printk_ratelimited(KERN_WARNING "sg_write: data in/out %d/%d bytes " "for SCSI command 0x%x-- guessing " "data in;\n program %s not setting " "count and/or reply_len properly\n", old_hdr.reply_len - (int)SZ_SG_HEADER, input_size, (unsigned int) cmnd[0], current->comm); strcpy(cmd, current->comm); } } k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking); return (k < 0) ? k : count; } Commit Message: sg_write()/bsg_write() is not fit to be called under KERNEL_DS Both damn things interpret userland pointers embedded into the payload; worse, they are actually traversing those. Leaving aside the bad API design, this is very much _not_ safe to call with KERNEL_DS. Bail out early if that happens. Cc: [email protected] Signed-off-by: Al Viro <[email protected]> CWE ID: CWE-416
sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) { int mxsize, cmd_size, k; int input_size, blocking; unsigned char opcode; Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; struct sg_header old_hdr; sg_io_hdr_t *hp; unsigned char cmnd[SG_MAX_CDB_SIZE]; if (unlikely(segment_eq(get_fs(), KERNEL_DS))) return -EINVAL; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_write: count=%d\n", (int) count)); if (atomic_read(&sdp->detaching)) return -ENODEV; if (!((filp->f_flags & O_NONBLOCK) || scsi_block_when_processing_errors(sdp->device))) return -ENXIO; if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; /* protects following copy_from_user()s + get_user()s */ if (count < SZ_SG_HEADER) return -EIO; if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER)) return -EFAULT; blocking = !(filp->f_flags & O_NONBLOCK); if (old_hdr.reply_len < 0) return sg_new_write(sfp, filp, buf, count, blocking, 0, 0, NULL); if (count < (SZ_SG_HEADER + 6)) return -EIO; /* The minimum scsi command length is 6 bytes. */ if (!(srp = sg_add_request(sfp))) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp, "sg_write: queue full\n")); return -EDOM; } buf += SZ_SG_HEADER; __get_user(opcode, buf); if (sfp->next_cmd_len > 0) { cmd_size = sfp->next_cmd_len; sfp->next_cmd_len = 0; /* reset so only this write() effected */ } else { cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */ if ((opcode >= 0xc0) && old_hdr.twelve_byte) cmd_size = 12; } SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); /* Determine buffer size. */ input_size = count - cmd_size; mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len; mxsize -= SZ_SG_HEADER; input_size -= SZ_SG_HEADER; if (input_size < 0) { sg_remove_request(sfp, srp); return -EIO; /* User did not pass enough bytes for this command. */ } hp = &srp->header; hp->interface_id = '\0'; /* indicator of old interface tunnelled */ hp->cmd_len = (unsigned char) cmd_size; hp->iovec_count = 0; hp->mx_sb_len = 0; if (input_size > 0) hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ? SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV; else hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; hp->dxfer_len = mxsize; if ((hp->dxfer_direction == SG_DXFER_TO_DEV) || (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV)) hp->dxferp = (char __user *)buf + cmd_size; else hp->dxferp = NULL; hp->sbp = NULL; hp->timeout = old_hdr.reply_len; /* structure abuse ... */ hp->flags = input_size; /* structure abuse ... */ hp->pack_id = old_hdr.pack_id; hp->usr_ptr = NULL; if (__copy_from_user(cmnd, buf, cmd_size)) return -EFAULT; /* * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, * but is is possible that the app intended SG_DXFER_TO_DEV, because there * is a non-zero input_size, so emit a warning. */ if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) { static char cmd[TASK_COMM_LEN]; if (strcmp(current->comm, cmd)) { printk_ratelimited(KERN_WARNING "sg_write: data in/out %d/%d bytes " "for SCSI command 0x%x-- guessing " "data in;\n program %s not setting " "count and/or reply_len properly\n", old_hdr.reply_len - (int)SZ_SG_HEADER, input_size, (unsigned int) cmnd[0], current->comm); strcpy(cmd, current->comm); } } k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking); return (k < 0) ? k : count; }
166,841
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int php_handler(request_rec *r) { php_struct * volatile ctx; void *conf; apr_bucket_brigade * volatile brigade; apr_bucket *bucket; apr_status_t rv; request_rec * volatile parent_req = NULL; TSRMLS_FETCH(); #define PHPAP_INI_OFF php_apache_ini_dtor(r, parent_req TSRMLS_CC); conf = ap_get_module_config(r->per_dir_config, &php5_module); /* apply_config() needs r in some cases, so allocate server_context early */ ctx = SG(server_context); if (ctx == NULL || (ctx && ctx->request_processed && !strcmp(r->protocol, "INCLUDED"))) { normal: ctx = SG(server_context) = apr_pcalloc(r->pool, sizeof(*ctx)); /* register a cleanup so we clear out the SG(server_context) * after each request. Note: We pass in the pointer to the * server_context in case this is handled by a different thread. */ apr_pool_cleanup_register(r->pool, (void *)&SG(server_context), php_server_context_cleanup, apr_pool_cleanup_null); ctx->r = r; ctx = NULL; /* May look weird to null it here, but it is to catch the right case in the first_try later on */ } else { parent_req = ctx->r; ctx->r = r; } apply_config(conf); if (strcmp(r->handler, PHP_MAGIC_TYPE) && strcmp(r->handler, PHP_SOURCE_MAGIC_TYPE) && strcmp(r->handler, PHP_SCRIPT)) { /* Check for xbithack in this case. */ if (!AP2(xbithack) || strcmp(r->handler, "text/html") || !(r->finfo.protection & APR_UEXECUTE)) { PHPAP_INI_OFF; return DECLINED; } } /* Give a 404 if PATH_INFO is used but is explicitly disabled in * the configuration; default behaviour is to accept. */ if (r->used_path_info == AP_REQ_REJECT_PATH_INFO && r->path_info && r->path_info[0]) { PHPAP_INI_OFF; return HTTP_NOT_FOUND; } /* handle situations where user turns the engine off */ if (!AP2(engine)) { PHPAP_INI_OFF; return DECLINED; } if (r->finfo.filetype == 0) { php_apache_sapi_log_message_ex("script '%s' not found or unable to stat", r TSRMLS_CC); PHPAP_INI_OFF; return HTTP_NOT_FOUND; } if (r->finfo.filetype == APR_DIR) { php_apache_sapi_log_message_ex("attempt to invoke directory '%s' as script", r TSRMLS_CC); PHPAP_INI_OFF; return HTTP_FORBIDDEN; } /* Setup the CGI variables if this is the main request */ if (r->main == NULL || /* .. or if the sub-request environment differs from the main-request. */ r->subprocess_env != r->main->subprocess_env ) { /* setup standard CGI variables */ ap_add_common_vars(r); ap_add_cgi_vars(r); } zend_first_try { if (ctx == NULL) { brigade = apr_brigade_create(r->pool, r->connection->bucket_alloc); ctx = SG(server_context); ctx->brigade = brigade; if (php_apache_request_ctor(r, ctx TSRMLS_CC)!=SUCCESS) { zend_bailout(); } } else { if (!parent_req) { parent_req = ctx->r; } if (parent_req && parent_req->handler && strcmp(parent_req->handler, PHP_MAGIC_TYPE) && strcmp(parent_req->handler, PHP_SOURCE_MAGIC_TYPE) && strcmp(parent_req->handler, PHP_SCRIPT)) { if (php_apache_request_ctor(r, ctx TSRMLS_CC)!=SUCCESS) { zend_bailout(); } } /* * check if coming due to ErrorDocument * We make a special exception of 413 (Invalid POST request) as the invalidity of the request occurs * during processing of the request by PHP during POST processing. Therefor we need to re-use the exiting * PHP instance to handle the request rather then creating a new one. */ if (parent_req && parent_req->status != HTTP_OK && parent_req->status != 413 && strcmp(r->protocol, "INCLUDED")) { parent_req = NULL; goto normal; } ctx->r = r; brigade = ctx->brigade; } if (AP2(last_modified)) { ap_update_mtime(r, r->finfo.mtime); ap_set_last_modified(r); } /* Determine if we need to parse the file or show the source */ if (strncmp(r->handler, PHP_SOURCE_MAGIC_TYPE, sizeof(PHP_SOURCE_MAGIC_TYPE) - 1) == 0) { zend_syntax_highlighter_ini syntax_highlighter_ini; php_get_highlight_struct(&syntax_highlighter_ini); highlight_file((char *)r->filename, &syntax_highlighter_ini TSRMLS_CC); } else { zend_file_handle zfd; zfd.type = ZEND_HANDLE_FILENAME; zfd.filename = (char *) r->filename; zfd.free_filename = 0; zfd.opened_path = NULL; if (!parent_req) { php_execute_script(&zfd TSRMLS_CC); } else { zend_execute_scripts(ZEND_INCLUDE TSRMLS_CC, NULL, 1, &zfd); } apr_table_set(r->notes, "mod_php_memory_usage", apr_psprintf(ctx->r->pool, "%" APR_SIZE_T_FMT, zend_memory_peak_usage(1 TSRMLS_CC))); } } zend_end_try(); if (!parent_req) { php_apache_request_dtor(r TSRMLS_CC); ctx->request_processed = 1; bucket = apr_bucket_eos_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(brigade, bucket); rv = ap_pass_brigade(r->output_filters, brigade); if (rv != APR_SUCCESS || r->connection->aborted) { zend_first_try { php_handle_aborted_connection(); } zend_end_try(); } apr_brigade_cleanup(brigade); apr_pool_cleanup_run(r->pool, (void *)&SG(server_context), php_server_context_cleanup); } else { ctx->r = parent_req; } return OK; } Commit Message: Fix for bug #76582 The brigade seems to end up in a messed up state if something fails in shutdown, so we clean it up. CWE ID: CWE-79
static int php_handler(request_rec *r) { php_struct * volatile ctx; void *conf; apr_bucket_brigade * volatile brigade; apr_bucket *bucket; apr_status_t rv; request_rec * volatile parent_req = NULL; TSRMLS_FETCH(); #define PHPAP_INI_OFF php_apache_ini_dtor(r, parent_req TSRMLS_CC); conf = ap_get_module_config(r->per_dir_config, &php5_module); /* apply_config() needs r in some cases, so allocate server_context early */ ctx = SG(server_context); if (ctx == NULL || (ctx && ctx->request_processed && !strcmp(r->protocol, "INCLUDED"))) { normal: ctx = SG(server_context) = apr_pcalloc(r->pool, sizeof(*ctx)); /* register a cleanup so we clear out the SG(server_context) * after each request. Note: We pass in the pointer to the * server_context in case this is handled by a different thread. */ apr_pool_cleanup_register(r->pool, (void *)&SG(server_context), php_server_context_cleanup, apr_pool_cleanup_null); ctx->r = r; ctx = NULL; /* May look weird to null it here, but it is to catch the right case in the first_try later on */ } else { parent_req = ctx->r; ctx->r = r; } apply_config(conf); if (strcmp(r->handler, PHP_MAGIC_TYPE) && strcmp(r->handler, PHP_SOURCE_MAGIC_TYPE) && strcmp(r->handler, PHP_SCRIPT)) { /* Check for xbithack in this case. */ if (!AP2(xbithack) || strcmp(r->handler, "text/html") || !(r->finfo.protection & APR_UEXECUTE)) { PHPAP_INI_OFF; return DECLINED; } } /* Give a 404 if PATH_INFO is used but is explicitly disabled in * the configuration; default behaviour is to accept. */ if (r->used_path_info == AP_REQ_REJECT_PATH_INFO && r->path_info && r->path_info[0]) { PHPAP_INI_OFF; return HTTP_NOT_FOUND; } /* handle situations where user turns the engine off */ if (!AP2(engine)) { PHPAP_INI_OFF; return DECLINED; } if (r->finfo.filetype == 0) { php_apache_sapi_log_message_ex("script '%s' not found or unable to stat", r TSRMLS_CC); PHPAP_INI_OFF; return HTTP_NOT_FOUND; } if (r->finfo.filetype == APR_DIR) { php_apache_sapi_log_message_ex("attempt to invoke directory '%s' as script", r TSRMLS_CC); PHPAP_INI_OFF; return HTTP_FORBIDDEN; } /* Setup the CGI variables if this is the main request */ if (r->main == NULL || /* .. or if the sub-request environment differs from the main-request. */ r->subprocess_env != r->main->subprocess_env ) { /* setup standard CGI variables */ ap_add_common_vars(r); ap_add_cgi_vars(r); } zend_first_try { if (ctx == NULL) { brigade = apr_brigade_create(r->pool, r->connection->bucket_alloc); ctx = SG(server_context); ctx->brigade = brigade; if (php_apache_request_ctor(r, ctx TSRMLS_CC)!=SUCCESS) { zend_bailout(); } } else { if (!parent_req) { parent_req = ctx->r; } if (parent_req && parent_req->handler && strcmp(parent_req->handler, PHP_MAGIC_TYPE) && strcmp(parent_req->handler, PHP_SOURCE_MAGIC_TYPE) && strcmp(parent_req->handler, PHP_SCRIPT)) { if (php_apache_request_ctor(r, ctx TSRMLS_CC)!=SUCCESS) { zend_bailout(); } } /* * check if coming due to ErrorDocument * We make a special exception of 413 (Invalid POST request) as the invalidity of the request occurs * during processing of the request by PHP during POST processing. Therefor we need to re-use the exiting * PHP instance to handle the request rather then creating a new one. */ if (parent_req && parent_req->status != HTTP_OK && parent_req->status != 413 && strcmp(r->protocol, "INCLUDED")) { parent_req = NULL; goto normal; } ctx->r = r; brigade = ctx->brigade; } if (AP2(last_modified)) { ap_update_mtime(r, r->finfo.mtime); ap_set_last_modified(r); } /* Determine if we need to parse the file or show the source */ if (strncmp(r->handler, PHP_SOURCE_MAGIC_TYPE, sizeof(PHP_SOURCE_MAGIC_TYPE) - 1) == 0) { zend_syntax_highlighter_ini syntax_highlighter_ini; php_get_highlight_struct(&syntax_highlighter_ini); highlight_file((char *)r->filename, &syntax_highlighter_ini TSRMLS_CC); } else { zend_file_handle zfd; zfd.type = ZEND_HANDLE_FILENAME; zfd.filename = (char *) r->filename; zfd.free_filename = 0; zfd.opened_path = NULL; if (!parent_req) { php_execute_script(&zfd TSRMLS_CC); } else { zend_execute_scripts(ZEND_INCLUDE TSRMLS_CC, NULL, 1, &zfd); } apr_table_set(r->notes, "mod_php_memory_usage", apr_psprintf(ctx->r->pool, "%" APR_SIZE_T_FMT, zend_memory_peak_usage(1 TSRMLS_CC))); } } zend_end_try(); if (!parent_req) { php_apache_request_dtor(r TSRMLS_CC); ctx->request_processed = 1; apr_brigade_cleanup(brigade); bucket = apr_bucket_eos_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(brigade, bucket); rv = ap_pass_brigade(r->output_filters, brigade); if (rv != APR_SUCCESS || r->connection->aborted) { zend_first_try { php_handle_aborted_connection(); } zend_end_try(); } apr_brigade_cleanup(brigade); apr_pool_cleanup_run(r->pool, (void *)&SG(server_context), php_server_context_cleanup); } else { ctx->r = parent_req; } return OK; }
169,028
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int search_old_relocation(struct reloc_struct_t *reloc_table, ut32 addr_to_patch, int n_reloc) { int i; for (i = 0; i < n_reloc; i++) { if (addr_to_patch == reloc_table[i].data_offset) { return i; } } return -1; } Commit Message: Fix #6829 oob write because of using wrong struct CWE ID: CWE-119
static int search_old_relocation(struct reloc_struct_t *reloc_table, ut32 addr_to_patch, int n_reloc) { static int search_old_relocation (struct reloc_struct_t *reloc_table, ut32 addr_to_patch, int n_reloc) { int i; for (i = 0; i < n_reloc; i++) { if (addr_to_patch == reloc_table[i].data_offset) { return i; } } return -1; }
168,365
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void MojoAudioOutputStream::OnStreamCreated( int stream_id, const base::SharedMemory* shared_memory, std::unique_ptr<base::CancelableSyncSocket> foreign_socket) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); DCHECK(stream_created_callback_); DCHECK(shared_memory); DCHECK(foreign_socket); base::SharedMemoryHandle foreign_memory_handle = base::SharedMemory::DuplicateHandle(shared_memory->handle()); if (!base::SharedMemory::IsHandleValid(foreign_memory_handle)) { OnStreamError(/*not used*/ 0); return; } mojo::ScopedSharedBufferHandle buffer_handle = mojo::WrapSharedMemoryHandle( foreign_memory_handle, shared_memory->requested_size(), false); mojo::ScopedHandle socket_handle = mojo::WrapPlatformFile(foreign_socket->Release()); DCHECK(buffer_handle.is_valid()); DCHECK(socket_handle.is_valid()); base::ResetAndReturn(&stream_created_callback_) .Run(std::move(buffer_handle), std::move(socket_handle)); } Commit Message: Correct mojo::WrapSharedMemoryHandle usage Fixes some incorrect uses of mojo::WrapSharedMemoryHandle which were assuming that the call actually has any control over the memory protection applied to a handle when mapped. Where fixing usage is infeasible for this CL, TODOs are added to annotate follow-up work. Also updates the API and documentation to (hopefully) improve clarity and avoid similar mistakes from being made in the future. BUG=792900 Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel Change-Id: I0578aaa9ca3bfcb01aaf2451315d1ede95458477 Reviewed-on: https://chromium-review.googlesource.com/818282 Reviewed-by: Wei Li <[email protected]> Reviewed-by: Lei Zhang <[email protected]> Reviewed-by: John Abd-El-Malek <[email protected]> Reviewed-by: Daniel Cheng <[email protected]> Reviewed-by: Sadrul Chowdhury <[email protected]> Reviewed-by: Yuzhu Shen <[email protected]> Reviewed-by: Robert Sesek <[email protected]> Commit-Queue: Ken Rockot <[email protected]> Cr-Commit-Position: refs/heads/master@{#530268} CWE ID: CWE-787
void MojoAudioOutputStream::OnStreamCreated( int stream_id, const base::SharedMemory* shared_memory, std::unique_ptr<base::CancelableSyncSocket> foreign_socket) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); DCHECK(stream_created_callback_); DCHECK(shared_memory); DCHECK(foreign_socket); base::SharedMemoryHandle foreign_memory_handle = base::SharedMemory::DuplicateHandle(shared_memory->handle()); if (!base::SharedMemory::IsHandleValid(foreign_memory_handle)) { OnStreamError(/*not used*/ 0); return; } mojo::ScopedSharedBufferHandle buffer_handle = mojo::WrapSharedMemoryHandle( foreign_memory_handle, shared_memory->requested_size(), mojo::UnwrappedSharedMemoryHandleProtection::kReadWrite); mojo::ScopedHandle socket_handle = mojo::WrapPlatformFile(foreign_socket->Release()); DCHECK(buffer_handle.is_valid()); DCHECK(socket_handle.is_valid()); base::ResetAndReturn(&stream_created_callback_) .Run(std::move(buffer_handle), std::move(socket_handle)); }
172,879
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: const SeekHead::VoidElement* SeekHead::GetVoidElement(int idx) const { if (idx < 0) return 0; if (idx >= m_void_element_count) return 0; return m_void_elements + idx; } Commit Message: libwebm: Pull from upstream Rolling mkvparser from upstream. Primarily for fixing a bug on parsing failures with certain Opus WebM files. Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae The diff is so huge because there were some style clean ups upstream. But it was ensured that there were no breaking changes when the style clean ups was done upstream. Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039 CWE ID: CWE-119
const SeekHead::VoidElement* SeekHead::GetVoidElement(int idx) const
174,380
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int main(argc, argv) int argc; char *argv[]; { krb5_data pname_data, tkt_data; int sock = 0; socklen_t l; int retval; struct sockaddr_in l_inaddr, f_inaddr; /* local, foreign address */ krb5_creds creds, *new_creds; krb5_ccache cc; krb5_data msgtext, msg; krb5_context context; krb5_auth_context auth_context = NULL; #ifndef DEBUG freopen("/tmp/uu-server.log", "w", stderr); #endif retval = krb5_init_context(&context); if (retval) { com_err(argv[0], retval, "while initializing krb5"); exit(1); } #ifdef DEBUG { int one = 1; int acc; struct servent *sp; socklen_t namelen = sizeof(f_inaddr); if ((sock = socket(PF_INET, SOCK_STREAM, 0)) < 0) { com_err("uu-server", errno, "creating socket"); exit(3); } l_inaddr.sin_family = AF_INET; l_inaddr.sin_addr.s_addr = 0; if (argc == 2) { l_inaddr.sin_port = htons(atoi(argv[1])); } else { if (!(sp = getservbyname("uu-sample", "tcp"))) { com_err("uu-server", 0, "can't find uu-sample/tcp service"); exit(3); } l_inaddr.sin_port = sp->s_port; } (void) setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (char *)&one, sizeof (one)); if (bind(sock, (struct sockaddr *)&l_inaddr, sizeof(l_inaddr))) { com_err("uu-server", errno, "binding socket"); exit(3); } if (listen(sock, 1) == -1) { com_err("uu-server", errno, "listening"); exit(3); } printf("Server started\n"); fflush(stdout); if ((acc = accept(sock, (struct sockaddr *)&f_inaddr, &namelen)) == -1) { com_err("uu-server", errno, "accepting"); exit(3); } dup2(acc, 0); close(sock); sock = 0; } #endif retval = krb5_read_message(context, (krb5_pointer) &sock, &pname_data); if (retval) { com_err ("uu-server", retval, "reading pname"); return 2; } retval = krb5_read_message(context, (krb5_pointer) &sock, &tkt_data); if (retval) { com_err ("uu-server", retval, "reading ticket data"); return 2; } retval = krb5_cc_default(context, &cc); if (retval) { com_err("uu-server", retval, "getting credentials cache"); return 4; } memset (&creds, 0, sizeof(creds)); retval = krb5_cc_get_principal(context, cc, &creds.client); if (retval) { com_err("uu-client", retval, "getting principal name"); return 6; } /* client sends it already null-terminated. */ printf ("uu-server: client principal is \"%s\".\n", pname_data.data); retval = krb5_parse_name(context, pname_data.data, &creds.server); if (retval) { com_err("uu-server", retval, "parsing client name"); return 3; } creds.second_ticket = tkt_data; printf ("uu-server: client ticket is %d bytes.\n", creds.second_ticket.length); retval = krb5_get_credentials(context, KRB5_GC_USER_USER, cc, &creds, &new_creds); if (retval) { com_err("uu-server", retval, "getting user-user ticket"); return 5; } #ifndef DEBUG l = sizeof(f_inaddr); if (getpeername(0, (struct sockaddr *)&f_inaddr, &l) == -1) { com_err("uu-server", errno, "getting client address"); return 6; } #endif l = sizeof(l_inaddr); if (getsockname(0, (struct sockaddr *)&l_inaddr, &l) == -1) { com_err("uu-server", errno, "getting local address"); return 6; } /* send a ticket/authenticator to the other side, so it can get the key we're using for the krb_safe below. */ retval = krb5_auth_con_init(context, &auth_context); if (retval) { com_err("uu-server", retval, "making auth_context"); return 8; } retval = krb5_auth_con_setflags(context, auth_context, KRB5_AUTH_CONTEXT_DO_SEQUENCE); if (retval) { com_err("uu-server", retval, "initializing the auth_context flags"); return 8; } retval = krb5_auth_con_genaddrs(context, auth_context, sock, KRB5_AUTH_CONTEXT_GENERATE_LOCAL_FULL_ADDR | KRB5_AUTH_CONTEXT_GENERATE_REMOTE_FULL_ADDR); if (retval) { com_err("uu-server", retval, "generating addrs for auth_context"); return 9; } #if 1 retval = krb5_mk_req_extended(context, &auth_context, AP_OPTS_USE_SESSION_KEY, NULL, new_creds, &msg); if (retval) { com_err("uu-server", retval, "making AP_REQ"); return 8; } retval = krb5_write_message(context, (krb5_pointer) &sock, &msg); #else retval = krb5_sendauth(context, &auth_context, (krb5_pointer)&sock, "???", 0, 0, AP_OPTS_MUTUAL_REQUIRED | AP_OPTS_USE_SESSION_KEY, NULL, &creds, cc, NULL, NULL, NULL); #endif if (retval) goto cl_short_wrt; free(msg.data); msgtext.length = 32; msgtext.data = "Hello, other end of connection."; retval = krb5_mk_safe(context, auth_context, &msgtext, &msg, NULL); if (retval) { com_err("uu-server", retval, "encoding message to client"); return 6; } retval = krb5_write_message(context, (krb5_pointer) &sock, &msg); if (retval) { cl_short_wrt: com_err("uu-server", retval, "writing message to client"); return 7; } krb5_free_data_contents(context, &msg); krb5_free_data_contents(context, &pname_data); /* tkt_data freed with creds */ krb5_free_cred_contents(context, &creds); krb5_free_creds(context, new_creds); krb5_cc_close(context, cc); krb5_auth_con_free(context, auth_context); krb5_free_context(context); return 0; } Commit Message: Fix krb5_read_message handling [CVE-2014-5355] In recvauth_common, do not use strcmp against the data fields of krb5_data objects populated by krb5_read_message(), as there is no guarantee that they are C strings. Instead, create an expected krb5_data value and use data_eq(). In the sample user-to-user server application, check that the received client principal name is null-terminated before using it with printf and krb5_parse_name. CVE-2014-5355: In MIT krb5, when a server process uses the krb5_recvauth function, an unauthenticated remote attacker can cause a NULL dereference by sending a zero-byte version string, or a read beyond the end of allocated storage by sending a non-null-terminated version string. The example user-to-user server application (uuserver) is similarly vulnerable to a zero-length or non-null-terminated principal name string. The krb5_recvauth function reads two version strings from the client using krb5_read_message(), which produces a krb5_data structure containing a length and a pointer to an octet sequence. krb5_recvauth assumes that the data pointer is a valid C string and passes it to strcmp() to verify the versions. If the client sends an empty octet sequence, the data pointer will be NULL and strcmp() will dereference a NULL pointer, causing the process to crash. If the client sends a non-null-terminated octet sequence, strcmp() will read beyond the end of the allocated storage, possibly causing the process to crash. uuserver similarly uses krb5_read_message() to read a client principal name, and then passes it to printf() and krb5_parse_name() without verifying that it is a valid C string. The krb5_recvauth function is used by kpropd and the Kerberized versions of the BSD rlogin and rsh daemons. These daemons are usually run out of inetd or in a mode which forks before processing incoming connections, so a process crash will generally not result in a complete denial of service. Thanks to Tim Uglow for discovering this issue. CVSSv2: AV:N/AC:L/Au:N/C:N/I:N/A:P/E:POC/RL:OF/RC:C [[email protected]: CVSS score] ticket: 8050 (new) target_version: 1.13.1 tags: pullup CWE ID:
int main(argc, argv) int argc; char *argv[]; { krb5_data pname_data, tkt_data; int sock = 0; socklen_t l; int retval; struct sockaddr_in l_inaddr, f_inaddr; /* local, foreign address */ krb5_creds creds, *new_creds; krb5_ccache cc; krb5_data msgtext, msg; krb5_context context; krb5_auth_context auth_context = NULL; #ifndef DEBUG freopen("/tmp/uu-server.log", "w", stderr); #endif retval = krb5_init_context(&context); if (retval) { com_err(argv[0], retval, "while initializing krb5"); exit(1); } #ifdef DEBUG { int one = 1; int acc; struct servent *sp; socklen_t namelen = sizeof(f_inaddr); if ((sock = socket(PF_INET, SOCK_STREAM, 0)) < 0) { com_err("uu-server", errno, "creating socket"); exit(3); } l_inaddr.sin_family = AF_INET; l_inaddr.sin_addr.s_addr = 0; if (argc == 2) { l_inaddr.sin_port = htons(atoi(argv[1])); } else { if (!(sp = getservbyname("uu-sample", "tcp"))) { com_err("uu-server", 0, "can't find uu-sample/tcp service"); exit(3); } l_inaddr.sin_port = sp->s_port; } (void) setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (char *)&one, sizeof (one)); if (bind(sock, (struct sockaddr *)&l_inaddr, sizeof(l_inaddr))) { com_err("uu-server", errno, "binding socket"); exit(3); } if (listen(sock, 1) == -1) { com_err("uu-server", errno, "listening"); exit(3); } printf("Server started\n"); fflush(stdout); if ((acc = accept(sock, (struct sockaddr *)&f_inaddr, &namelen)) == -1) { com_err("uu-server", errno, "accepting"); exit(3); } dup2(acc, 0); close(sock); sock = 0; } #endif /* principal name must be sent null-terminated. */ retval = krb5_read_message(context, (krb5_pointer) &sock, &pname_data); if (retval || pname_data.length == 0 || pname_data.data[pname_data.length - 1] != '\0') { com_err ("uu-server", retval, "reading pname"); return 2; } retval = krb5_read_message(context, (krb5_pointer) &sock, &tkt_data); if (retval) { com_err ("uu-server", retval, "reading ticket data"); return 2; } retval = krb5_cc_default(context, &cc); if (retval) { com_err("uu-server", retval, "getting credentials cache"); return 4; } memset (&creds, 0, sizeof(creds)); retval = krb5_cc_get_principal(context, cc, &creds.client); if (retval) { com_err("uu-client", retval, "getting principal name"); return 6; } /* client sends it already null-terminated. */ printf ("uu-server: client principal is \"%s\".\n", pname_data.data); retval = krb5_parse_name(context, pname_data.data, &creds.server); if (retval) { com_err("uu-server", retval, "parsing client name"); return 3; } creds.second_ticket = tkt_data; printf ("uu-server: client ticket is %d bytes.\n", creds.second_ticket.length); retval = krb5_get_credentials(context, KRB5_GC_USER_USER, cc, &creds, &new_creds); if (retval) { com_err("uu-server", retval, "getting user-user ticket"); return 5; } #ifndef DEBUG l = sizeof(f_inaddr); if (getpeername(0, (struct sockaddr *)&f_inaddr, &l) == -1) { com_err("uu-server", errno, "getting client address"); return 6; } #endif l = sizeof(l_inaddr); if (getsockname(0, (struct sockaddr *)&l_inaddr, &l) == -1) { com_err("uu-server", errno, "getting local address"); return 6; } /* send a ticket/authenticator to the other side, so it can get the key we're using for the krb_safe below. */ retval = krb5_auth_con_init(context, &auth_context); if (retval) { com_err("uu-server", retval, "making auth_context"); return 8; } retval = krb5_auth_con_setflags(context, auth_context, KRB5_AUTH_CONTEXT_DO_SEQUENCE); if (retval) { com_err("uu-server", retval, "initializing the auth_context flags"); return 8; } retval = krb5_auth_con_genaddrs(context, auth_context, sock, KRB5_AUTH_CONTEXT_GENERATE_LOCAL_FULL_ADDR | KRB5_AUTH_CONTEXT_GENERATE_REMOTE_FULL_ADDR); if (retval) { com_err("uu-server", retval, "generating addrs for auth_context"); return 9; } #if 1 retval = krb5_mk_req_extended(context, &auth_context, AP_OPTS_USE_SESSION_KEY, NULL, new_creds, &msg); if (retval) { com_err("uu-server", retval, "making AP_REQ"); return 8; } retval = krb5_write_message(context, (krb5_pointer) &sock, &msg); #else retval = krb5_sendauth(context, &auth_context, (krb5_pointer)&sock, "???", 0, 0, AP_OPTS_MUTUAL_REQUIRED | AP_OPTS_USE_SESSION_KEY, NULL, &creds, cc, NULL, NULL, NULL); #endif if (retval) goto cl_short_wrt; free(msg.data); msgtext.length = 32; msgtext.data = "Hello, other end of connection."; retval = krb5_mk_safe(context, auth_context, &msgtext, &msg, NULL); if (retval) { com_err("uu-server", retval, "encoding message to client"); return 6; } retval = krb5_write_message(context, (krb5_pointer) &sock, &msg); if (retval) { cl_short_wrt: com_err("uu-server", retval, "writing message to client"); return 7; } krb5_free_data_contents(context, &msg); krb5_free_data_contents(context, &pname_data); /* tkt_data freed with creds */ krb5_free_cred_contents(context, &creds); krb5_free_creds(context, new_creds); krb5_cc_close(context, cc); krb5_auth_con_free(context, auth_context); krb5_free_context(context); return 0; }
166,811
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void BluetoothDeviceChromeOS::OnRegisterAgentError( const ConnectErrorCallback& error_callback, const std::string& error_name, const std::string& error_message) { if (--num_connecting_calls_ == 0) adapter_->NotifyDeviceChanged(this); DCHECK(num_connecting_calls_ >= 0); LOG(WARNING) << object_path_.value() << ": Failed to register agent: " << error_name << ": " << error_message; VLOG(1) << object_path_.value() << ": " << num_connecting_calls_ << " still in progress"; UnregisterAgent(); ConnectErrorCode error_code = ERROR_UNKNOWN; if (error_name == bluetooth_agent_manager::kErrorAlreadyExists) error_code = ERROR_INPROGRESS; RecordPairingResult(error_code); error_callback.Run(error_code); } Commit Message: Refactor to support default Bluetooth pairing delegate In order to support a default pairing delegate we need to move the agent service provider delegate implementation from BluetoothDevice to BluetoothAdapter while retaining the existing API. BUG=338492 TEST=device_unittests, unit_tests, browser_tests Review URL: https://codereview.chromium.org/148293003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@252216 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID:
void BluetoothDeviceChromeOS::OnRegisterAgentError(
171,230
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) { in_signal_handler = 1; if (BeingDebugged()) BreakDebugger(); PrintToStderr("Received signal "); char buf[1024] = { 0 }; internal::itoa_r(signal, buf, sizeof(buf), 10, 0); PrintToStderr(buf); if (signal == SIGBUS) { if (info->si_code == BUS_ADRALN) PrintToStderr(" BUS_ADRALN "); else if (info->si_code == BUS_ADRERR) PrintToStderr(" BUS_ADRERR "); else if (info->si_code == BUS_OBJERR) PrintToStderr(" BUS_OBJERR "); else PrintToStderr(" <unknown> "); } else if (signal == SIGFPE) { if (info->si_code == FPE_FLTDIV) PrintToStderr(" FPE_FLTDIV "); else if (info->si_code == FPE_FLTINV) PrintToStderr(" FPE_FLTINV "); else if (info->si_code == FPE_FLTOVF) PrintToStderr(" FPE_FLTOVF "); else if (info->si_code == FPE_FLTRES) PrintToStderr(" FPE_FLTRES "); else if (info->si_code == FPE_FLTSUB) PrintToStderr(" FPE_FLTSUB "); else if (info->si_code == FPE_FLTUND) PrintToStderr(" FPE_FLTUND "); else if (info->si_code == FPE_INTDIV) PrintToStderr(" FPE_INTDIV "); else if (info->si_code == FPE_INTOVF) PrintToStderr(" FPE_INTOVF "); else PrintToStderr(" <unknown> "); } else if (signal == SIGILL) { if (info->si_code == ILL_BADSTK) PrintToStderr(" ILL_BADSTK "); else if (info->si_code == ILL_COPROC) PrintToStderr(" ILL_COPROC "); else if (info->si_code == ILL_ILLOPN) PrintToStderr(" ILL_ILLOPN "); else if (info->si_code == ILL_ILLADR) PrintToStderr(" ILL_ILLADR "); else if (info->si_code == ILL_ILLTRP) PrintToStderr(" ILL_ILLTRP "); else if (info->si_code == ILL_PRVOPC) PrintToStderr(" ILL_PRVOPC "); else if (info->si_code == ILL_PRVREG) PrintToStderr(" ILL_PRVREG "); else PrintToStderr(" <unknown> "); } else if (signal == SIGSEGV) { if (info->si_code == SEGV_MAPERR) PrintToStderr(" SEGV_MAPERR "); else if (info->si_code == SEGV_ACCERR) PrintToStderr(" SEGV_ACCERR "); else PrintToStderr(" <unknown> "); } if (signal == SIGBUS || signal == SIGFPE || signal == SIGILL || signal == SIGSEGV) { internal::itoa_r(reinterpret_cast<intptr_t>(info->si_addr), buf, sizeof(buf), 16, 12); PrintToStderr(buf); } PrintToStderr("\n"); debug::StackTrace().Print(); #if defined(OS_LINUX) #if ARCH_CPU_X86_FAMILY ucontext_t* context = reinterpret_cast<ucontext_t*>(void_context); const struct { const char* label; greg_t value; } registers[] = { #if ARCH_CPU_32_BITS { " gs: ", context->uc_mcontext.gregs[REG_GS] }, { " fs: ", context->uc_mcontext.gregs[REG_FS] }, { " es: ", context->uc_mcontext.gregs[REG_ES] }, { " ds: ", context->uc_mcontext.gregs[REG_DS] }, { " edi: ", context->uc_mcontext.gregs[REG_EDI] }, { " esi: ", context->uc_mcontext.gregs[REG_ESI] }, { " ebp: ", context->uc_mcontext.gregs[REG_EBP] }, { " esp: ", context->uc_mcontext.gregs[REG_ESP] }, { " ebx: ", context->uc_mcontext.gregs[REG_EBX] }, { " edx: ", context->uc_mcontext.gregs[REG_EDX] }, { " ecx: ", context->uc_mcontext.gregs[REG_ECX] }, { " eax: ", context->uc_mcontext.gregs[REG_EAX] }, { " trp: ", context->uc_mcontext.gregs[REG_TRAPNO] }, { " err: ", context->uc_mcontext.gregs[REG_ERR] }, { " ip: ", context->uc_mcontext.gregs[REG_EIP] }, { " cs: ", context->uc_mcontext.gregs[REG_CS] }, { " efl: ", context->uc_mcontext.gregs[REG_EFL] }, { " usp: ", context->uc_mcontext.gregs[REG_UESP] }, { " ss: ", context->uc_mcontext.gregs[REG_SS] }, #elif ARCH_CPU_64_BITS { " r8: ", context->uc_mcontext.gregs[REG_R8] }, { " r9: ", context->uc_mcontext.gregs[REG_R9] }, { " r10: ", context->uc_mcontext.gregs[REG_R10] }, { " r11: ", context->uc_mcontext.gregs[REG_R11] }, { " r12: ", context->uc_mcontext.gregs[REG_R12] }, { " r13: ", context->uc_mcontext.gregs[REG_R13] }, { " r14: ", context->uc_mcontext.gregs[REG_R14] }, { " r15: ", context->uc_mcontext.gregs[REG_R15] }, { " di: ", context->uc_mcontext.gregs[REG_RDI] }, { " si: ", context->uc_mcontext.gregs[REG_RSI] }, { " bp: ", context->uc_mcontext.gregs[REG_RBP] }, { " bx: ", context->uc_mcontext.gregs[REG_RBX] }, { " dx: ", context->uc_mcontext.gregs[REG_RDX] }, { " ax: ", context->uc_mcontext.gregs[REG_RAX] }, { " cx: ", context->uc_mcontext.gregs[REG_RCX] }, { " sp: ", context->uc_mcontext.gregs[REG_RSP] }, { " ip: ", context->uc_mcontext.gregs[REG_RIP] }, { " efl: ", context->uc_mcontext.gregs[REG_EFL] }, { " cgf: ", context->uc_mcontext.gregs[REG_CSGSFS] }, { " erf: ", context->uc_mcontext.gregs[REG_ERR] }, { " trp: ", context->uc_mcontext.gregs[REG_TRAPNO] }, { " msk: ", context->uc_mcontext.gregs[REG_OLDMASK] }, { " cr2: ", context->uc_mcontext.gregs[REG_CR2] }, #endif }; #if ARCH_CPU_32_BITS const int kRegisterPadding = 8; #elif ARCH_CPU_64_BITS const int kRegisterPadding = 16; #endif for (size_t i = 0; i < ARRAYSIZE_UNSAFE(registers); i++) { PrintToStderr(registers[i].label); internal::itoa_r(registers[i].value, buf, sizeof(buf), 16, kRegisterPadding); PrintToStderr(buf); if ((i + 1) % 4 == 0) PrintToStderr("\n"); } PrintToStderr("\n"); #endif #elif defined(OS_MACOSX) #if ARCH_CPU_X86_FAMILY && ARCH_CPU_32_BITS ucontext_t* context = reinterpret_cast<ucontext_t*>(void_context); size_t len; len = static_cast<size_t>( snprintf(buf, sizeof(buf), "ax: %x, bx: %x, cx: %x, dx: %x\n", context->uc_mcontext->__ss.__eax, context->uc_mcontext->__ss.__ebx, context->uc_mcontext->__ss.__ecx, context->uc_mcontext->__ss.__edx)); write(STDERR_FILENO, buf, std::min(len, sizeof(buf) - 1)); len = static_cast<size_t>( snprintf(buf, sizeof(buf), "di: %x, si: %x, bp: %x, sp: %x, ss: %x, flags: %x\n", context->uc_mcontext->__ss.__edi, context->uc_mcontext->__ss.__esi, context->uc_mcontext->__ss.__ebp, context->uc_mcontext->__ss.__esp, context->uc_mcontext->__ss.__ss, context->uc_mcontext->__ss.__eflags)); write(STDERR_FILENO, buf, std::min(len, sizeof(buf) - 1)); len = static_cast<size_t>( snprintf(buf, sizeof(buf), "ip: %x, cs: %x, ds: %x, es: %x, fs: %x, gs: %x\n", context->uc_mcontext->__ss.__eip, context->uc_mcontext->__ss.__cs, context->uc_mcontext->__ss.__ds, context->uc_mcontext->__ss.__es, context->uc_mcontext->__ss.__fs, context->uc_mcontext->__ss.__gs)); write(STDERR_FILENO, buf, std::min(len, sizeof(buf) - 1)); #endif // ARCH_CPU_32_BITS #endif // defined(OS_MACOSX) _exit(1); } Commit Message: Convert ARRAYSIZE_UNSAFE -> arraysize in base/. [email protected] BUG=423134 Review URL: https://codereview.chromium.org/656033009 Cr-Commit-Position: refs/heads/master@{#299835} CWE ID: CWE-189
void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) { in_signal_handler = 1; if (BeingDebugged()) BreakDebugger(); PrintToStderr("Received signal "); char buf[1024] = { 0 }; internal::itoa_r(signal, buf, sizeof(buf), 10, 0); PrintToStderr(buf); if (signal == SIGBUS) { if (info->si_code == BUS_ADRALN) PrintToStderr(" BUS_ADRALN "); else if (info->si_code == BUS_ADRERR) PrintToStderr(" BUS_ADRERR "); else if (info->si_code == BUS_OBJERR) PrintToStderr(" BUS_OBJERR "); else PrintToStderr(" <unknown> "); } else if (signal == SIGFPE) { if (info->si_code == FPE_FLTDIV) PrintToStderr(" FPE_FLTDIV "); else if (info->si_code == FPE_FLTINV) PrintToStderr(" FPE_FLTINV "); else if (info->si_code == FPE_FLTOVF) PrintToStderr(" FPE_FLTOVF "); else if (info->si_code == FPE_FLTRES) PrintToStderr(" FPE_FLTRES "); else if (info->si_code == FPE_FLTSUB) PrintToStderr(" FPE_FLTSUB "); else if (info->si_code == FPE_FLTUND) PrintToStderr(" FPE_FLTUND "); else if (info->si_code == FPE_INTDIV) PrintToStderr(" FPE_INTDIV "); else if (info->si_code == FPE_INTOVF) PrintToStderr(" FPE_INTOVF "); else PrintToStderr(" <unknown> "); } else if (signal == SIGILL) { if (info->si_code == ILL_BADSTK) PrintToStderr(" ILL_BADSTK "); else if (info->si_code == ILL_COPROC) PrintToStderr(" ILL_COPROC "); else if (info->si_code == ILL_ILLOPN) PrintToStderr(" ILL_ILLOPN "); else if (info->si_code == ILL_ILLADR) PrintToStderr(" ILL_ILLADR "); else if (info->si_code == ILL_ILLTRP) PrintToStderr(" ILL_ILLTRP "); else if (info->si_code == ILL_PRVOPC) PrintToStderr(" ILL_PRVOPC "); else if (info->si_code == ILL_PRVREG) PrintToStderr(" ILL_PRVREG "); else PrintToStderr(" <unknown> "); } else if (signal == SIGSEGV) { if (info->si_code == SEGV_MAPERR) PrintToStderr(" SEGV_MAPERR "); else if (info->si_code == SEGV_ACCERR) PrintToStderr(" SEGV_ACCERR "); else PrintToStderr(" <unknown> "); } if (signal == SIGBUS || signal == SIGFPE || signal == SIGILL || signal == SIGSEGV) { internal::itoa_r(reinterpret_cast<intptr_t>(info->si_addr), buf, sizeof(buf), 16, 12); PrintToStderr(buf); } PrintToStderr("\n"); debug::StackTrace().Print(); #if defined(OS_LINUX) #if ARCH_CPU_X86_FAMILY ucontext_t* context = reinterpret_cast<ucontext_t*>(void_context); const struct { const char* label; greg_t value; } registers[] = { #if ARCH_CPU_32_BITS { " gs: ", context->uc_mcontext.gregs[REG_GS] }, { " fs: ", context->uc_mcontext.gregs[REG_FS] }, { " es: ", context->uc_mcontext.gregs[REG_ES] }, { " ds: ", context->uc_mcontext.gregs[REG_DS] }, { " edi: ", context->uc_mcontext.gregs[REG_EDI] }, { " esi: ", context->uc_mcontext.gregs[REG_ESI] }, { " ebp: ", context->uc_mcontext.gregs[REG_EBP] }, { " esp: ", context->uc_mcontext.gregs[REG_ESP] }, { " ebx: ", context->uc_mcontext.gregs[REG_EBX] }, { " edx: ", context->uc_mcontext.gregs[REG_EDX] }, { " ecx: ", context->uc_mcontext.gregs[REG_ECX] }, { " eax: ", context->uc_mcontext.gregs[REG_EAX] }, { " trp: ", context->uc_mcontext.gregs[REG_TRAPNO] }, { " err: ", context->uc_mcontext.gregs[REG_ERR] }, { " ip: ", context->uc_mcontext.gregs[REG_EIP] }, { " cs: ", context->uc_mcontext.gregs[REG_CS] }, { " efl: ", context->uc_mcontext.gregs[REG_EFL] }, { " usp: ", context->uc_mcontext.gregs[REG_UESP] }, { " ss: ", context->uc_mcontext.gregs[REG_SS] }, #elif ARCH_CPU_64_BITS { " r8: ", context->uc_mcontext.gregs[REG_R8] }, { " r9: ", context->uc_mcontext.gregs[REG_R9] }, { " r10: ", context->uc_mcontext.gregs[REG_R10] }, { " r11: ", context->uc_mcontext.gregs[REG_R11] }, { " r12: ", context->uc_mcontext.gregs[REG_R12] }, { " r13: ", context->uc_mcontext.gregs[REG_R13] }, { " r14: ", context->uc_mcontext.gregs[REG_R14] }, { " r15: ", context->uc_mcontext.gregs[REG_R15] }, { " di: ", context->uc_mcontext.gregs[REG_RDI] }, { " si: ", context->uc_mcontext.gregs[REG_RSI] }, { " bp: ", context->uc_mcontext.gregs[REG_RBP] }, { " bx: ", context->uc_mcontext.gregs[REG_RBX] }, { " dx: ", context->uc_mcontext.gregs[REG_RDX] }, { " ax: ", context->uc_mcontext.gregs[REG_RAX] }, { " cx: ", context->uc_mcontext.gregs[REG_RCX] }, { " sp: ", context->uc_mcontext.gregs[REG_RSP] }, { " ip: ", context->uc_mcontext.gregs[REG_RIP] }, { " efl: ", context->uc_mcontext.gregs[REG_EFL] }, { " cgf: ", context->uc_mcontext.gregs[REG_CSGSFS] }, { " erf: ", context->uc_mcontext.gregs[REG_ERR] }, { " trp: ", context->uc_mcontext.gregs[REG_TRAPNO] }, { " msk: ", context->uc_mcontext.gregs[REG_OLDMASK] }, { " cr2: ", context->uc_mcontext.gregs[REG_CR2] }, #endif }; #if ARCH_CPU_32_BITS const int kRegisterPadding = 8; #elif ARCH_CPU_64_BITS const int kRegisterPadding = 16; #endif for (size_t i = 0; i < arraysize(registers); i++) { PrintToStderr(registers[i].label); internal::itoa_r(registers[i].value, buf, sizeof(buf), 16, kRegisterPadding); PrintToStderr(buf); if ((i + 1) % 4 == 0) PrintToStderr("\n"); } PrintToStderr("\n"); #endif #elif defined(OS_MACOSX) #if ARCH_CPU_X86_FAMILY && ARCH_CPU_32_BITS ucontext_t* context = reinterpret_cast<ucontext_t*>(void_context); size_t len; len = static_cast<size_t>( snprintf(buf, sizeof(buf), "ax: %x, bx: %x, cx: %x, dx: %x\n", context->uc_mcontext->__ss.__eax, context->uc_mcontext->__ss.__ebx, context->uc_mcontext->__ss.__ecx, context->uc_mcontext->__ss.__edx)); write(STDERR_FILENO, buf, std::min(len, sizeof(buf) - 1)); len = static_cast<size_t>( snprintf(buf, sizeof(buf), "di: %x, si: %x, bp: %x, sp: %x, ss: %x, flags: %x\n", context->uc_mcontext->__ss.__edi, context->uc_mcontext->__ss.__esi, context->uc_mcontext->__ss.__ebp, context->uc_mcontext->__ss.__esp, context->uc_mcontext->__ss.__ss, context->uc_mcontext->__ss.__eflags)); write(STDERR_FILENO, buf, std::min(len, sizeof(buf) - 1)); len = static_cast<size_t>( snprintf(buf, sizeof(buf), "ip: %x, cs: %x, ds: %x, es: %x, fs: %x, gs: %x\n", context->uc_mcontext->__ss.__eip, context->uc_mcontext->__ss.__cs, context->uc_mcontext->__ss.__ds, context->uc_mcontext->__ss.__es, context->uc_mcontext->__ss.__fs, context->uc_mcontext->__ss.__gs)); write(STDERR_FILENO, buf, std::min(len, sizeof(buf) - 1)); #endif // ARCH_CPU_32_BITS #endif // defined(OS_MACOSX) _exit(1); }
171,162