instruction
stringclasses
1 value
input
stringlengths
90
9.34k
output
stringlengths
16
15.4k
__index_level_0__
int64
165k
175k
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int php_handler(request_rec *r) { php_struct * volatile ctx; void *conf; apr_bucket_brigade * volatile brigade; apr_bucket *bucket; apr_status_t rv; request_rec * volatile parent_req = NULL; TSRMLS_FETCH(); #define PHPAP_INI_OFF php_apache_ini_dtor(r, parent_req TSRMLS_CC); conf = ap_get_module_config(r->per_dir_config, &php5_module); /* apply_config() needs r in some cases, so allocate server_context early */ ctx = SG(server_context); if (ctx == NULL || (ctx && ctx->request_processed && !strcmp(r->protocol, "INCLUDED"))) { normal: ctx = SG(server_context) = apr_pcalloc(r->pool, sizeof(*ctx)); /* register a cleanup so we clear out the SG(server_context) * after each request. Note: We pass in the pointer to the * server_context in case this is handled by a different thread. */ apr_pool_cleanup_register(r->pool, (void *)&SG(server_context), php_server_context_cleanup, apr_pool_cleanup_null); ctx->r = r; ctx = NULL; /* May look weird to null it here, but it is to catch the right case in the first_try later on */ } else { parent_req = ctx->r; ctx->r = r; } apply_config(conf); if (strcmp(r->handler, PHP_MAGIC_TYPE) && strcmp(r->handler, PHP_SOURCE_MAGIC_TYPE) && strcmp(r->handler, PHP_SCRIPT)) { /* Check for xbithack in this case. */ if (!AP2(xbithack) || strcmp(r->handler, "text/html") || !(r->finfo.protection & APR_UEXECUTE)) { PHPAP_INI_OFF; return DECLINED; } } /* Give a 404 if PATH_INFO is used but is explicitly disabled in * the configuration; default behaviour is to accept. */ if (r->used_path_info == AP_REQ_REJECT_PATH_INFO && r->path_info && r->path_info[0]) { PHPAP_INI_OFF; return HTTP_NOT_FOUND; } /* handle situations where user turns the engine off */ if (!AP2(engine)) { PHPAP_INI_OFF; return DECLINED; } if (r->finfo.filetype == 0) { php_apache_sapi_log_message_ex("script '%s' not found or unable to stat", r TSRMLS_CC); PHPAP_INI_OFF; return HTTP_NOT_FOUND; } if (r->finfo.filetype == APR_DIR) { php_apache_sapi_log_message_ex("attempt to invoke directory '%s' as script", r TSRMLS_CC); PHPAP_INI_OFF; return HTTP_FORBIDDEN; } /* Setup the CGI variables if this is the main request */ if (r->main == NULL || /* .. or if the sub-request environment differs from the main-request. */ r->subprocess_env != r->main->subprocess_env ) { /* setup standard CGI variables */ ap_add_common_vars(r); ap_add_cgi_vars(r); } zend_first_try { if (ctx == NULL) { brigade = apr_brigade_create(r->pool, r->connection->bucket_alloc); ctx = SG(server_context); ctx->brigade = brigade; if (php_apache_request_ctor(r, ctx TSRMLS_CC)!=SUCCESS) { zend_bailout(); } } else { if (!parent_req) { parent_req = ctx->r; } if (parent_req && parent_req->handler && strcmp(parent_req->handler, PHP_MAGIC_TYPE) && strcmp(parent_req->handler, PHP_SOURCE_MAGIC_TYPE) && strcmp(parent_req->handler, PHP_SCRIPT)) { if (php_apache_request_ctor(r, ctx TSRMLS_CC)!=SUCCESS) { zend_bailout(); } } /* * check if coming due to ErrorDocument * We make a special exception of 413 (Invalid POST request) as the invalidity of the request occurs * during processing of the request by PHP during POST processing. Therefor we need to re-use the exiting * PHP instance to handle the request rather then creating a new one. */ if (parent_req && parent_req->status != HTTP_OK && parent_req->status != 413 && strcmp(r->protocol, "INCLUDED")) { parent_req = NULL; goto normal; } ctx->r = r; brigade = ctx->brigade; } if (AP2(last_modified)) { ap_update_mtime(r, r->finfo.mtime); ap_set_last_modified(r); } /* Determine if we need to parse the file or show the source */ if (strncmp(r->handler, PHP_SOURCE_MAGIC_TYPE, sizeof(PHP_SOURCE_MAGIC_TYPE) - 1) == 0) { zend_syntax_highlighter_ini syntax_highlighter_ini; php_get_highlight_struct(&syntax_highlighter_ini); highlight_file((char *)r->filename, &syntax_highlighter_ini TSRMLS_CC); } else { zend_file_handle zfd; zfd.type = ZEND_HANDLE_FILENAME; zfd.filename = (char *) r->filename; zfd.free_filename = 0; zfd.opened_path = NULL; if (!parent_req) { php_execute_script(&zfd TSRMLS_CC); } else { zend_execute_scripts(ZEND_INCLUDE TSRMLS_CC, NULL, 1, &zfd); } apr_table_set(r->notes, "mod_php_memory_usage", apr_psprintf(ctx->r->pool, "%" APR_SIZE_T_FMT, zend_memory_peak_usage(1 TSRMLS_CC))); } } zend_end_try(); if (!parent_req) { php_apache_request_dtor(r TSRMLS_CC); ctx->request_processed = 1; bucket = apr_bucket_eos_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(brigade, bucket); rv = ap_pass_brigade(r->output_filters, brigade); if (rv != APR_SUCCESS || r->connection->aborted) { zend_first_try { php_handle_aborted_connection(); } zend_end_try(); } apr_brigade_cleanup(brigade); } else { ctx->r = parent_req; } return OK; } Commit Message: CWE ID: CWE-20
static int php_handler(request_rec *r) { php_struct * volatile ctx; void *conf; apr_bucket_brigade * volatile brigade; apr_bucket *bucket; apr_status_t rv; request_rec * volatile parent_req = NULL; TSRMLS_FETCH(); #define PHPAP_INI_OFF php_apache_ini_dtor(r, parent_req TSRMLS_CC); conf = ap_get_module_config(r->per_dir_config, &php5_module); /* apply_config() needs r in some cases, so allocate server_context early */ ctx = SG(server_context); if (ctx == NULL || (ctx && ctx->request_processed && !strcmp(r->protocol, "INCLUDED"))) { normal: ctx = SG(server_context) = apr_pcalloc(r->pool, sizeof(*ctx)); /* register a cleanup so we clear out the SG(server_context) * after each request. Note: We pass in the pointer to the * server_context in case this is handled by a different thread. */ apr_pool_cleanup_register(r->pool, (void *)&SG(server_context), php_server_context_cleanup, apr_pool_cleanup_null); ctx->r = r; ctx = NULL; /* May look weird to null it here, but it is to catch the right case in the first_try later on */ } else { parent_req = ctx->r; ctx->r = r; } apply_config(conf); if (strcmp(r->handler, PHP_MAGIC_TYPE) && strcmp(r->handler, PHP_SOURCE_MAGIC_TYPE) && strcmp(r->handler, PHP_SCRIPT)) { /* Check for xbithack in this case. */ if (!AP2(xbithack) || strcmp(r->handler, "text/html") || !(r->finfo.protection & APR_UEXECUTE)) { PHPAP_INI_OFF; return DECLINED; } } /* Give a 404 if PATH_INFO is used but is explicitly disabled in * the configuration; default behaviour is to accept. */ if (r->used_path_info == AP_REQ_REJECT_PATH_INFO && r->path_info && r->path_info[0]) { PHPAP_INI_OFF; return HTTP_NOT_FOUND; } /* handle situations where user turns the engine off */ if (!AP2(engine)) { PHPAP_INI_OFF; return DECLINED; } if (r->finfo.filetype == 0) { php_apache_sapi_log_message_ex("script '%s' not found or unable to stat", r TSRMLS_CC); PHPAP_INI_OFF; return HTTP_NOT_FOUND; } if (r->finfo.filetype == APR_DIR) { php_apache_sapi_log_message_ex("attempt to invoke directory '%s' as script", r TSRMLS_CC); PHPAP_INI_OFF; return HTTP_FORBIDDEN; } /* Setup the CGI variables if this is the main request */ if (r->main == NULL || /* .. or if the sub-request environment differs from the main-request. */ r->subprocess_env != r->main->subprocess_env ) { /* setup standard CGI variables */ ap_add_common_vars(r); ap_add_cgi_vars(r); } zend_first_try { if (ctx == NULL) { brigade = apr_brigade_create(r->pool, r->connection->bucket_alloc); ctx = SG(server_context); ctx->brigade = brigade; if (php_apache_request_ctor(r, ctx TSRMLS_CC)!=SUCCESS) { zend_bailout(); } } else { if (!parent_req) { parent_req = ctx->r; } if (parent_req && parent_req->handler && strcmp(parent_req->handler, PHP_MAGIC_TYPE) && strcmp(parent_req->handler, PHP_SOURCE_MAGIC_TYPE) && strcmp(parent_req->handler, PHP_SCRIPT)) { if (php_apache_request_ctor(r, ctx TSRMLS_CC)!=SUCCESS) { zend_bailout(); } } /* * check if coming due to ErrorDocument * We make a special exception of 413 (Invalid POST request) as the invalidity of the request occurs * during processing of the request by PHP during POST processing. Therefor we need to re-use the exiting * PHP instance to handle the request rather then creating a new one. */ if (parent_req && parent_req->status != HTTP_OK && parent_req->status != 413 && strcmp(r->protocol, "INCLUDED")) { parent_req = NULL; goto normal; } ctx->r = r; brigade = ctx->brigade; } if (AP2(last_modified)) { ap_update_mtime(r, r->finfo.mtime); ap_set_last_modified(r); } /* Determine if we need to parse the file or show the source */ if (strncmp(r->handler, PHP_SOURCE_MAGIC_TYPE, sizeof(PHP_SOURCE_MAGIC_TYPE) - 1) == 0) { zend_syntax_highlighter_ini syntax_highlighter_ini; php_get_highlight_struct(&syntax_highlighter_ini); highlight_file((char *)r->filename, &syntax_highlighter_ini TSRMLS_CC); } else { zend_file_handle zfd; zfd.type = ZEND_HANDLE_FILENAME; zfd.filename = (char *) r->filename; zfd.free_filename = 0; zfd.opened_path = NULL; if (!parent_req) { php_execute_script(&zfd TSRMLS_CC); } else { zend_execute_scripts(ZEND_INCLUDE TSRMLS_CC, NULL, 1, &zfd); } apr_table_set(r->notes, "mod_php_memory_usage", apr_psprintf(ctx->r->pool, "%" APR_SIZE_T_FMT, zend_memory_peak_usage(1 TSRMLS_CC))); } } zend_end_try(); if (!parent_req) { php_apache_request_dtor(r TSRMLS_CC); ctx->request_processed = 1; bucket = apr_bucket_eos_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(brigade, bucket); rv = ap_pass_brigade(r->output_filters, brigade); if (rv != APR_SUCCESS || r->connection->aborted) { zend_first_try { php_handle_aborted_connection(); } zend_end_try(); } apr_brigade_cleanup(brigade); apr_pool_cleanup_run(r->pool, (void *)&SG(server_context), php_server_context_cleanup); } else { ctx->r = parent_req; } return OK; }
164,710
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: daemon_msg_open_req(uint8 ver, struct daemon_slpars *pars, uint32 plen, char *source, size_t sourcelen) { char errbuf[PCAP_ERRBUF_SIZE]; // buffer for network errors char errmsgbuf[PCAP_ERRBUF_SIZE]; // buffer for errors to send to the client pcap_t *fp; // pcap_t main variable int nread; char sendbuf[RPCAP_NETBUF_SIZE]; // temporary buffer in which data to be sent is buffered int sendbufidx = 0; // index which keeps the number of bytes currently buffered struct rpcap_openreply *openreply; // open reply message if (plen > sourcelen - 1) { pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "Source string too long"); goto error; } nread = sock_recv(pars->sockctrl, source, plen, SOCK_RECEIVEALL_YES|SOCK_EOF_IS_ERROR, errbuf, PCAP_ERRBUF_SIZE); if (nread == -1) { rpcapd_log(LOGPRIO_ERROR, "Read from client failed: %s", errbuf); return -1; } source[nread] = '\0'; plen -= nread; if ((fp = pcap_open_live(source, 1500 /* fake snaplen */, 0 /* no promis */, 1000 /* fake timeout */, errmsgbuf)) == NULL) goto error; if (sock_bufferize(NULL, sizeof(struct rpcap_header), NULL, &sendbufidx, RPCAP_NETBUF_SIZE, SOCKBUF_CHECKONLY, errmsgbuf, PCAP_ERRBUF_SIZE) == -1) goto error; rpcap_createhdr((struct rpcap_header *) sendbuf, ver, RPCAP_MSG_OPEN_REPLY, 0, sizeof(struct rpcap_openreply)); openreply = (struct rpcap_openreply *) &sendbuf[sendbufidx]; if (sock_bufferize(NULL, sizeof(struct rpcap_openreply), NULL, &sendbufidx, RPCAP_NETBUF_SIZE, SOCKBUF_CHECKONLY, errmsgbuf, PCAP_ERRBUF_SIZE) == -1) goto error; memset(openreply, 0, sizeof(struct rpcap_openreply)); openreply->linktype = htonl(pcap_datalink(fp)); openreply->tzoff = 0; /* This is always 0 for live captures */ pcap_close(fp); if (sock_send(pars->sockctrl, sendbuf, sendbufidx, errbuf, PCAP_ERRBUF_SIZE) == -1) { rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf); return -1; } return 0; error: if (rpcap_senderror(pars->sockctrl, ver, PCAP_ERR_OPEN, errmsgbuf, errbuf) == -1) { rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf); return -1; } if (rpcapd_discard(pars->sockctrl, plen) == -1) { return -1; } return 0; } Commit Message: In the open request, reject capture sources that are URLs. You shouldn't be able to ask a server to open a remote device on some *other* server; just open it yourself. This addresses Include Security issue F13: [libpcap] Remote Packet Capture Daemon Allows Opening Capture URLs. CWE ID: CWE-918
daemon_msg_open_req(uint8 ver, struct daemon_slpars *pars, uint32 plen, char *source, size_t sourcelen) { char errbuf[PCAP_ERRBUF_SIZE]; // buffer for network errors char errmsgbuf[PCAP_ERRBUF_SIZE]; // buffer for errors to send to the client pcap_t *fp; // pcap_t main variable int nread; char sendbuf[RPCAP_NETBUF_SIZE]; // temporary buffer in which data to be sent is buffered int sendbufidx = 0; // index which keeps the number of bytes currently buffered struct rpcap_openreply *openreply; // open reply message if (plen > sourcelen - 1) { pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "Source string too long"); goto error; } nread = sock_recv(pars->sockctrl, source, plen, SOCK_RECEIVEALL_YES|SOCK_EOF_IS_ERROR, errbuf, PCAP_ERRBUF_SIZE); if (nread == -1) { rpcapd_log(LOGPRIO_ERROR, "Read from client failed: %s", errbuf); return -1; } source[nread] = '\0'; plen -= nread; // Is this a URL rather than a device? // If so, reject it. if (is_url(source)) { pcap_snprintf(errmsgbuf, PCAP_ERRBUF_SIZE, "Source string refers to a remote device"); goto error; } if ((fp = pcap_open_live(source, 1500 /* fake snaplen */, 0 /* no promis */, 1000 /* fake timeout */, errmsgbuf)) == NULL) goto error; if (sock_bufferize(NULL, sizeof(struct rpcap_header), NULL, &sendbufidx, RPCAP_NETBUF_SIZE, SOCKBUF_CHECKONLY, errmsgbuf, PCAP_ERRBUF_SIZE) == -1) goto error; rpcap_createhdr((struct rpcap_header *) sendbuf, ver, RPCAP_MSG_OPEN_REPLY, 0, sizeof(struct rpcap_openreply)); openreply = (struct rpcap_openreply *) &sendbuf[sendbufidx]; if (sock_bufferize(NULL, sizeof(struct rpcap_openreply), NULL, &sendbufidx, RPCAP_NETBUF_SIZE, SOCKBUF_CHECKONLY, errmsgbuf, PCAP_ERRBUF_SIZE) == -1) goto error; memset(openreply, 0, sizeof(struct rpcap_openreply)); openreply->linktype = htonl(pcap_datalink(fp)); openreply->tzoff = 0; /* This is always 0 for live captures */ pcap_close(fp); if (sock_send(pars->sockctrl, sendbuf, sendbufidx, errbuf, PCAP_ERRBUF_SIZE) == -1) { rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf); return -1; } return 0; error: if (rpcap_senderror(pars->sockctrl, ver, PCAP_ERR_OPEN, errmsgbuf, errbuf) == -1) { rpcapd_log(LOGPRIO_ERROR, "Send to client failed: %s", errbuf); return -1; } if (rpcapd_discard(pars->sockctrl, plen) == -1) { return -1; } return 0; }
169,540
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg) { struct flakey_c *fc = ti->private; return __blkdev_driver_ioctl(fc->dev->bdev, fc->dev->mode, cmd, arg); } Commit Message: dm: do not forward ioctls from logical volumes to the underlying device A logical volume can map to just part of underlying physical volume. In this case, it must be treated like a partition. Based on a patch from Alasdair G Kergon. Cc: Alasdair G Kergon <[email protected]> Cc: [email protected] Signed-off-by: Paolo Bonzini <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> CWE ID: CWE-264
static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg) { struct flakey_c *fc = ti->private; struct dm_dev *dev = fc->dev; int r = 0; /* * Only pass ioctls through if the device sizes match exactly. */ if (fc->start || ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) r = scsi_verify_blk_ioctl(NULL, cmd); return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg); }
165,722
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void Encoder::EncodeFrameInternal(const VideoSource &video, const unsigned long frame_flags) { vpx_codec_err_t res; const vpx_image_t *img = video.img(); if (!encoder_.priv) { cfg_.g_w = img->d_w; cfg_.g_h = img->d_h; cfg_.g_timebase = video.timebase(); cfg_.rc_twopass_stats_in = stats_->buf(); res = vpx_codec_enc_init(&encoder_, CodecInterface(), &cfg_, init_flags_); ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError(); } if (cfg_.g_w != img->d_w || cfg_.g_h != img->d_h) { cfg_.g_w = img->d_w; cfg_.g_h = img->d_h; res = vpx_codec_enc_config_set(&encoder_, &cfg_); ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError(); } REGISTER_STATE_CHECK( res = vpx_codec_encode(&encoder_, video.img(), video.pts(), video.duration(), frame_flags, deadline_)); ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError(); } Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478 DO NOT MERGE - libvpx: Pull from upstream Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06 BUG=23452792 Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec CWE ID: CWE-119
void Encoder::EncodeFrameInternal(const VideoSource &video, const unsigned long frame_flags) { vpx_codec_err_t res; const vpx_image_t *img = video.img(); if (cfg_.g_w != img->d_w || cfg_.g_h != img->d_h) { cfg_.g_w = img->d_w; cfg_.g_h = img->d_h; res = vpx_codec_enc_config_set(&encoder_, &cfg_); ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError(); } API_REGISTER_STATE_CHECK( res = vpx_codec_encode(&encoder_, img, video.pts(), video.duration(), frame_flags, deadline_)); ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError(); }
174,536
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: unsigned int SAD(unsigned int max_sad, int block_idx = 0) { unsigned int ret; const uint8_t* const reference = GetReference(block_idx); REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_, reference, reference_stride_, max_sad)); return ret; } Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478 DO NOT MERGE - libvpx: Pull from upstream Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06 BUG=23452792 Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec CWE ID: CWE-119
unsigned int SAD(unsigned int max_sad, int block_idx = 0) {
174,575
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: OMX_ERRORTYPE omx_video::use_output_buffer( OMX_IN OMX_HANDLETYPE hComp, OMX_INOUT OMX_BUFFERHEADERTYPE** bufferHdr, OMX_IN OMX_U32 port, OMX_IN OMX_PTR appData, OMX_IN OMX_U32 bytes, OMX_IN OMX_U8* buffer) { (void)hComp, (void)port; OMX_ERRORTYPE eRet = OMX_ErrorNone; OMX_BUFFERHEADERTYPE *bufHdr= NULL; // buffer header unsigned i= 0; // Temporary counter unsigned char *buf_addr = NULL; #ifdef _MSM8974_ int align_size; #endif DEBUG_PRINT_HIGH("Inside use_output_buffer()"); if (bytes != m_sOutPortDef.nBufferSize) { DEBUG_PRINT_ERROR("ERROR: use_output_buffer: Size Mismatch!! " "bytes[%u] != Port.nBufferSize[%u]", (unsigned int)bytes, (unsigned int)m_sOutPortDef.nBufferSize); return OMX_ErrorBadParameter; } if (!m_out_mem_ptr) { output_use_buffer = true; int nBufHdrSize = 0; DEBUG_PRINT_LOW("Allocating First Output Buffer(%u)",(unsigned int)m_sOutPortDef.nBufferCountActual); nBufHdrSize = m_sOutPortDef.nBufferCountActual * sizeof(OMX_BUFFERHEADERTYPE); /* * Memory for output side involves the following: * 1. Array of Buffer Headers * 2. Bitmask array to hold the buffer allocation details * In order to minimize the memory management entire allocation * is done in one step. */ m_out_mem_ptr = (OMX_BUFFERHEADERTYPE *)calloc(nBufHdrSize,1); if (m_out_mem_ptr == NULL) { DEBUG_PRINT_ERROR("ERROR: calloc() Failed for m_out_mem_ptr"); return OMX_ErrorInsufficientResources; } m_pOutput_pmem = (struct pmem *) calloc(sizeof (struct pmem), m_sOutPortDef.nBufferCountActual); if (m_pOutput_pmem == NULL) { DEBUG_PRINT_ERROR("ERROR: calloc() Failed for m_pOutput_pmem"); return OMX_ErrorInsufficientResources; } #ifdef USE_ION m_pOutput_ion = (struct venc_ion *) calloc(sizeof (struct venc_ion), m_sOutPortDef.nBufferCountActual); if (m_pOutput_ion == NULL) { DEBUG_PRINT_ERROR("ERROR: calloc() Failed for m_pOutput_ion"); return OMX_ErrorInsufficientResources; } #endif if (m_out_mem_ptr) { bufHdr = m_out_mem_ptr; DEBUG_PRINT_LOW("Memory Allocation Succeeded for OUT port%p",m_out_mem_ptr); for (i=0; i < m_sOutPortDef.nBufferCountActual ; i++) { bufHdr->nSize = sizeof(OMX_BUFFERHEADERTYPE); bufHdr->nVersion.nVersion = OMX_SPEC_VERSION; bufHdr->nAllocLen = bytes; bufHdr->nFilledLen = 0; bufHdr->pAppPrivate = appData; bufHdr->nOutputPortIndex = PORT_INDEX_OUT; bufHdr->pBuffer = NULL; bufHdr++; m_pOutput_pmem[i].fd = -1; #ifdef USE_ION m_pOutput_ion[i].ion_device_fd =-1; m_pOutput_ion[i].fd_ion_data.fd=-1; m_pOutput_ion[i].ion_alloc_data.handle = 0; #endif } } else { DEBUG_PRINT_ERROR("ERROR: Output buf mem alloc failed[0x%p]",m_out_mem_ptr); eRet = OMX_ErrorInsufficientResources; } } for (i=0; i< m_sOutPortDef.nBufferCountActual; i++) { if (BITMASK_ABSENT(&m_out_bm_count,i)) { break; } } if (eRet == OMX_ErrorNone) { if (i < m_sOutPortDef.nBufferCountActual) { *bufferHdr = (m_out_mem_ptr + i ); (*bufferHdr)->pBuffer = (OMX_U8 *)buffer; (*bufferHdr)->pAppPrivate = appData; BITMASK_SET(&m_out_bm_count,i); if (!m_use_output_pmem) { #ifdef USE_ION #ifdef _MSM8974_ align_size = (m_sOutPortDef.nBufferSize + (SZ_4K - 1)) & ~(SZ_4K - 1); m_pOutput_ion[i].ion_device_fd = alloc_map_ion_memory(align_size, &m_pOutput_ion[i].ion_alloc_data, &m_pOutput_ion[i].fd_ion_data,0); #else m_pOutput_ion[i].ion_device_fd = alloc_map_ion_memory( m_sOutPortDef.nBufferSize, &m_pOutput_ion[i].ion_alloc_data, &m_pOutput_ion[i].fd_ion_data,ION_FLAG_CACHED); #endif if (m_pOutput_ion[i].ion_device_fd < 0) { DEBUG_PRINT_ERROR("ERROR:ION device open() Failed"); return OMX_ErrorInsufficientResources; } m_pOutput_pmem[i].fd = m_pOutput_ion[i].fd_ion_data.fd; #else m_pOutput_pmem[i].fd = open (MEM_DEVICE,O_RDWR); if (m_pOutput_pmem[i].fd == 0) { m_pOutput_pmem[i].fd = open (MEM_DEVICE,O_RDWR); } if (m_pOutput_pmem[i].fd < 0) { DEBUG_PRINT_ERROR("ERROR: /dev/pmem_adsp open() Failed"); return OMX_ErrorInsufficientResources; } #endif m_pOutput_pmem[i].size = m_sOutPortDef.nBufferSize; m_pOutput_pmem[i].offset = 0; m_pOutput_pmem[i].buffer = (OMX_U8 *)SECURE_BUFPTR; if(!secure_session) { #ifdef _MSM8974_ m_pOutput_pmem[i].buffer = (unsigned char *)mmap(NULL, align_size,PROT_READ|PROT_WRITE, MAP_SHARED,m_pOutput_pmem[i].fd,0); #else m_pOutput_pmem[i].buffer = (unsigned char *)mmap(NULL, m_pOutput_pmem[i].size,PROT_READ|PROT_WRITE, MAP_SHARED,m_pOutput_pmem[i].fd,0); #endif if (m_pOutput_pmem[i].buffer == MAP_FAILED) { DEBUG_PRINT_ERROR("ERROR: mmap() Failed"); close(m_pOutput_pmem[i].fd); #ifdef USE_ION free_ion_memory(&m_pOutput_ion[i]); #endif return OMX_ErrorInsufficientResources; } } } else { OMX_QCOM_PLATFORM_PRIVATE_PMEM_INFO *pParam = reinterpret_cast<OMX_QCOM_PLATFORM_PRIVATE_PMEM_INFO*>((*bufferHdr)->pAppPrivate); DEBUG_PRINT_LOW("Inside qcom_ext pParam: %p", pParam); if (pParam) { DEBUG_PRINT_LOW("Inside qcom_ext with luma:(fd:%lu,offset:0x%x)", pParam->pmem_fd, (int)pParam->offset); m_pOutput_pmem[i].fd = pParam->pmem_fd; m_pOutput_pmem[i].offset = pParam->offset; m_pOutput_pmem[i].size = m_sOutPortDef.nBufferSize; m_pOutput_pmem[i].buffer = (unsigned char *)buffer; } else { DEBUG_PRINT_ERROR("ERROR: Invalid AppData given for PMEM o/p UseBuffer case"); return OMX_ErrorBadParameter; } buf_addr = (unsigned char *)buffer; } DEBUG_PRINT_LOW("use_out:: bufhdr = %p, pBuffer = %p, m_pOutput_pmem[i].buffer = %p", (*bufferHdr), (*bufferHdr)->pBuffer, m_pOutput_pmem[i].buffer); if (dev_use_buf(&m_pOutput_pmem[i],PORT_INDEX_OUT,i) != true) { DEBUG_PRINT_ERROR("ERROR: dev_use_buf Failed for o/p buf"); return OMX_ErrorInsufficientResources; } } else { DEBUG_PRINT_ERROR("ERROR: All o/p Buffers have been Used, invalid use_buf call for " "index = %u", i); eRet = OMX_ErrorInsufficientResources; } } return eRet; } Commit Message: DO NOT MERGE mm-video-v4l2: venc: add safety checks for freeing buffers Allow only up to 64 buffers on input/output port (since the allocation bitmap is only 64-wide). Add safety checks to free only as many buffers were allocated. Fixes: Heap Overflow and Possible Local Privilege Escalation in MediaServer (libOmxVenc problem) Bug: 27532497 Change-Id: I31e576ef9dc542df73aa6b0ea113d72724b50fc6 CWE ID: CWE-119
OMX_ERRORTYPE omx_video::use_output_buffer( OMX_IN OMX_HANDLETYPE hComp, OMX_INOUT OMX_BUFFERHEADERTYPE** bufferHdr, OMX_IN OMX_U32 port, OMX_IN OMX_PTR appData, OMX_IN OMX_U32 bytes, OMX_IN OMX_U8* buffer) { (void)hComp, (void)port; OMX_ERRORTYPE eRet = OMX_ErrorNone; OMX_BUFFERHEADERTYPE *bufHdr= NULL; // buffer header unsigned i= 0; // Temporary counter unsigned char *buf_addr = NULL; #ifdef _MSM8974_ int align_size; #endif DEBUG_PRINT_HIGH("Inside use_output_buffer()"); if (bytes != m_sOutPortDef.nBufferSize) { DEBUG_PRINT_ERROR("ERROR: use_output_buffer: Size Mismatch!! " "bytes[%u] != Port.nBufferSize[%u]", (unsigned int)bytes, (unsigned int)m_sOutPortDef.nBufferSize); return OMX_ErrorBadParameter; } if (!m_out_mem_ptr) { output_use_buffer = true; int nBufHdrSize = 0; DEBUG_PRINT_LOW("Allocating First Output Buffer(%u)",(unsigned int)m_sOutPortDef.nBufferCountActual); nBufHdrSize = m_sOutPortDef.nBufferCountActual * sizeof(OMX_BUFFERHEADERTYPE); /* * Memory for output side involves the following: * 1. Array of Buffer Headers * 2. Bitmask array to hold the buffer allocation details * In order to minimize the memory management entire allocation * is done in one step. */ m_out_mem_ptr = (OMX_BUFFERHEADERTYPE *)calloc(nBufHdrSize,1); if (m_out_mem_ptr == NULL) { DEBUG_PRINT_ERROR("ERROR: calloc() Failed for m_out_mem_ptr"); return OMX_ErrorInsufficientResources; } m_pOutput_pmem = (struct pmem *) calloc(sizeof (struct pmem), m_sOutPortDef.nBufferCountActual); if (m_pOutput_pmem == NULL) { DEBUG_PRINT_ERROR("ERROR: calloc() Failed for m_pOutput_pmem"); return OMX_ErrorInsufficientResources; } #ifdef USE_ION m_pOutput_ion = (struct venc_ion *) calloc(sizeof (struct venc_ion), m_sOutPortDef.nBufferCountActual); if (m_pOutput_ion == NULL) { DEBUG_PRINT_ERROR("ERROR: calloc() Failed for m_pOutput_ion"); return OMX_ErrorInsufficientResources; } #endif if (m_out_mem_ptr) { bufHdr = m_out_mem_ptr; DEBUG_PRINT_LOW("Memory Allocation Succeeded for OUT port%p",m_out_mem_ptr); for (i=0; i < m_sOutPortDef.nBufferCountActual ; i++) { bufHdr->nSize = sizeof(OMX_BUFFERHEADERTYPE); bufHdr->nVersion.nVersion = OMX_SPEC_VERSION; bufHdr->nAllocLen = bytes; bufHdr->nFilledLen = 0; bufHdr->pAppPrivate = appData; bufHdr->nOutputPortIndex = PORT_INDEX_OUT; bufHdr->pBuffer = NULL; bufHdr++; m_pOutput_pmem[i].fd = -1; #ifdef USE_ION m_pOutput_ion[i].ion_device_fd =-1; m_pOutput_ion[i].fd_ion_data.fd=-1; m_pOutput_ion[i].ion_alloc_data.handle = 0; #endif } } else { DEBUG_PRINT_ERROR("ERROR: Output buf mem alloc failed[0x%p]",m_out_mem_ptr); eRet = OMX_ErrorInsufficientResources; } } for (i=0; i< m_sOutPortDef.nBufferCountActual; i++) { if (BITMASK_ABSENT(&m_out_bm_count,i)) { break; } } if (eRet == OMX_ErrorNone) { if (i < m_sOutPortDef.nBufferCountActual) { *bufferHdr = (m_out_mem_ptr + i ); (*bufferHdr)->pBuffer = (OMX_U8 *)buffer; (*bufferHdr)->pAppPrivate = appData; if (!m_use_output_pmem) { #ifdef USE_ION #ifdef _MSM8974_ align_size = (m_sOutPortDef.nBufferSize + (SZ_4K - 1)) & ~(SZ_4K - 1); m_pOutput_ion[i].ion_device_fd = alloc_map_ion_memory(align_size, &m_pOutput_ion[i].ion_alloc_data, &m_pOutput_ion[i].fd_ion_data,0); #else m_pOutput_ion[i].ion_device_fd = alloc_map_ion_memory( m_sOutPortDef.nBufferSize, &m_pOutput_ion[i].ion_alloc_data, &m_pOutput_ion[i].fd_ion_data,ION_FLAG_CACHED); #endif if (m_pOutput_ion[i].ion_device_fd < 0) { DEBUG_PRINT_ERROR("ERROR:ION device open() Failed"); return OMX_ErrorInsufficientResources; } m_pOutput_pmem[i].fd = m_pOutput_ion[i].fd_ion_data.fd; #else m_pOutput_pmem[i].fd = open (MEM_DEVICE,O_RDWR); if (m_pOutput_pmem[i].fd == 0) { m_pOutput_pmem[i].fd = open (MEM_DEVICE,O_RDWR); } if (m_pOutput_pmem[i].fd < 0) { DEBUG_PRINT_ERROR("ERROR: /dev/pmem_adsp open() Failed"); return OMX_ErrorInsufficientResources; } #endif m_pOutput_pmem[i].size = m_sOutPortDef.nBufferSize; m_pOutput_pmem[i].offset = 0; m_pOutput_pmem[i].buffer = (OMX_U8 *)SECURE_BUFPTR; if(!secure_session) { #ifdef _MSM8974_ m_pOutput_pmem[i].buffer = (unsigned char *)mmap(NULL, align_size,PROT_READ|PROT_WRITE, MAP_SHARED,m_pOutput_pmem[i].fd,0); #else m_pOutput_pmem[i].buffer = (unsigned char *)mmap(NULL, m_pOutput_pmem[i].size,PROT_READ|PROT_WRITE, MAP_SHARED,m_pOutput_pmem[i].fd,0); #endif if (m_pOutput_pmem[i].buffer == MAP_FAILED) { DEBUG_PRINT_ERROR("ERROR: mmap() Failed"); close(m_pOutput_pmem[i].fd); #ifdef USE_ION free_ion_memory(&m_pOutput_ion[i]); #endif return OMX_ErrorInsufficientResources; } } } else { OMX_QCOM_PLATFORM_PRIVATE_PMEM_INFO *pParam = reinterpret_cast<OMX_QCOM_PLATFORM_PRIVATE_PMEM_INFO*>((*bufferHdr)->pAppPrivate); DEBUG_PRINT_LOW("Inside qcom_ext pParam: %p", pParam); if (pParam) { DEBUG_PRINT_LOW("Inside qcom_ext with luma:(fd:%lu,offset:0x%x)", pParam->pmem_fd, (int)pParam->offset); m_pOutput_pmem[i].fd = pParam->pmem_fd; m_pOutput_pmem[i].offset = pParam->offset; m_pOutput_pmem[i].size = m_sOutPortDef.nBufferSize; m_pOutput_pmem[i].buffer = (unsigned char *)buffer; } else { DEBUG_PRINT_ERROR("ERROR: Invalid AppData given for PMEM o/p UseBuffer case"); return OMX_ErrorBadParameter; } buf_addr = (unsigned char *)buffer; } DEBUG_PRINT_LOW("use_out:: bufhdr = %p, pBuffer = %p, m_pOutput_pmem[i].buffer = %p", (*bufferHdr), (*bufferHdr)->pBuffer, m_pOutput_pmem[i].buffer); if (dev_use_buf(&m_pOutput_pmem[i],PORT_INDEX_OUT,i) != true) { DEBUG_PRINT_ERROR("ERROR: dev_use_buf Failed for o/p buf"); return OMX_ErrorInsufficientResources; } BITMASK_SET(&m_out_bm_count,i); } else { DEBUG_PRINT_ERROR("ERROR: All o/p Buffers have been Used, invalid use_buf call for " "index = %u", i); eRet = OMX_ErrorInsufficientResources; } } return eRet; }
173,781
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void reference_32x32_dct_2d(const int16_t input[kNumCoeffs], double output[kNumCoeffs]) { for (int i = 0; i < 32; ++i) { double temp_in[32], temp_out[32]; for (int j = 0; j < 32; ++j) temp_in[j] = input[j*32 + i]; reference_32x32_dct_1d(temp_in, temp_out, 1); for (int j = 0; j < 32; ++j) output[j * 32 + i] = temp_out[j]; } for (int i = 0; i < 32; ++i) { double temp_in[32], temp_out[32]; for (int j = 0; j < 32; ++j) temp_in[j] = output[j + i*32]; reference_32x32_dct_1d(temp_in, temp_out, 1); for (int j = 0; j < 32; ++j) output[j + i * 32] = temp_out[j] / 4; } } Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478 DO NOT MERGE - libvpx: Pull from upstream Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06 BUG=23452792 Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec CWE ID: CWE-119
void reference_32x32_dct_2d(const int16_t input[kNumCoeffs], double output[kNumCoeffs]) { for (int i = 0; i < 32; ++i) { double temp_in[32], temp_out[32]; for (int j = 0; j < 32; ++j) temp_in[j] = input[j*32 + i]; reference_32x32_dct_1d(temp_in, temp_out); for (int j = 0; j < 32; ++j) output[j * 32 + i] = temp_out[j]; } for (int i = 0; i < 32; ++i) { double temp_in[32], temp_out[32]; for (int j = 0; j < 32; ++j) temp_in[j] = output[j + i*32]; reference_32x32_dct_1d(temp_in, temp_out); for (int j = 0; j < 32; ++j) output[j + i * 32] = temp_out[j] / 4; } }
174,533
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: jbig2_parse_segment_header(Jbig2Ctx *ctx, uint8_t *buf, size_t buf_size, size_t *p_header_size) { Jbig2Segment *result; uint8_t rtscarf; uint32_t rtscarf_long; uint32_t *referred_to_segments; int referred_to_segment_count; int referred_to_segment_size; int pa_size; int offset; /* minimum possible size of a jbig2 segment header */ if (buf_size < 11) return NULL; result = jbig2_new(ctx, Jbig2Segment, 1); if (result == NULL) { jbig2_error(ctx, JBIG2_SEVERITY_FATAL, -1, "failed to allocate segment in jbig2_parse_segment_header"); return result; } /* 7.2.2 */ result->number = jbig2_get_uint32(buf); /* 7.2.3 */ result->flags = buf[4]; /* 7.2.4 referred-to segments */ rtscarf = buf[5]; if ((rtscarf & 0xe0) == 0xe0) { rtscarf_long = jbig2_get_uint32(buf + 5); referred_to_segment_count = rtscarf_long & 0x1fffffff; offset = 5 + 4 + (referred_to_segment_count + 1) / 8; } else { referred_to_segment_count = (rtscarf >> 5); offset = 5 + 1; } result->referred_to_segment_count = referred_to_segment_count; /* we now have enough information to compute the full header length */ referred_to_segment_size = result->number <= 256 ? 1 : result->number <= 65536 ? 2 : 4; /* 7.2.5 */ pa_size = result->flags & 0x40 ? 4 : 1; /* 7.2.6 */ if (offset + referred_to_segment_count * referred_to_segment_size + pa_size + 4 > buf_size) { jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, result->number, "jbig2_parse_segment_header() called with insufficient data", -1); jbig2_free(ctx->allocator, result); return NULL; } /* 7.2.5 */ if (referred_to_segment_count) { int i; referred_to_segments = jbig2_new(ctx, uint32_t, referred_to_segment_count * referred_to_segment_size); if (referred_to_segments == NULL) { jbig2_error(ctx, JBIG2_SEVERITY_FATAL, -1, "could not allocate referred_to_segments " "in jbig2_parse_segment_header"); return NULL; } for (i = 0; i < referred_to_segment_count; i++) { referred_to_segments[i] = (referred_to_segment_size == 1) ? buf[offset] : (referred_to_segment_size == 2) ? jbig2_get_uint16(buf + offset) : jbig2_get_uint32(buf + offset); offset += referred_to_segment_size; jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, result->number, "segment %d refers to segment %d", result->number, referred_to_segments[i]); } result->referred_to_segments = referred_to_segments; } else { /* no referred-to segments */ result->referred_to_segments = NULL; } /* 7.2.6 */ if (result->flags & 0x40) { result->page_association = jbig2_get_uint32(buf + offset); offset += 4; } else { result->page_association = buf[offset++]; } jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, result->number, "segment %d is associated with page %d", result->number, result->page_association); /* 7.2.7 */ result->data_length = jbig2_get_uint32(buf + offset); *p_header_size = offset + 4; /* no body parsing results yet */ result->result = NULL; return result; } Commit Message: CWE ID: CWE-119
jbig2_parse_segment_header(Jbig2Ctx *ctx, uint8_t *buf, size_t buf_size, size_t *p_header_size) { Jbig2Segment *result; uint8_t rtscarf; uint32_t rtscarf_long; uint32_t *referred_to_segments; uint32_t referred_to_segment_count; uint32_t referred_to_segment_size; uint32_t pa_size; uint32_t offset; /* minimum possible size of a jbig2 segment header */ if (buf_size < 11) return NULL; result = jbig2_new(ctx, Jbig2Segment, 1); if (result == NULL) { jbig2_error(ctx, JBIG2_SEVERITY_FATAL, -1, "failed to allocate segment in jbig2_parse_segment_header"); return result; } /* 7.2.2 */ result->number = jbig2_get_uint32(buf); /* 7.2.3 */ result->flags = buf[4]; /* 7.2.4 referred-to segments */ rtscarf = buf[5]; if ((rtscarf & 0xe0) == 0xe0) { rtscarf_long = jbig2_get_uint32(buf + 5); referred_to_segment_count = rtscarf_long & 0x1fffffff; offset = 5 + 4 + (referred_to_segment_count + 1) / 8; } else { referred_to_segment_count = (rtscarf >> 5); offset = 5 + 1; } result->referred_to_segment_count = referred_to_segment_count; /* we now have enough information to compute the full header length */ referred_to_segment_size = result->number <= 256 ? 1 : result->number <= 65536 ? 2 : 4; /* 7.2.5 */ pa_size = result->flags & 0x40 ? 4 : 1; /* 7.2.6 */ if (offset + referred_to_segment_count * referred_to_segment_size + pa_size + 4 > buf_size) { jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, result->number, "jbig2_parse_segment_header() called with insufficient data", -1); jbig2_free(ctx->allocator, result); return NULL; } /* 7.2.5 */ if (referred_to_segment_count) { uint32_t i; referred_to_segments = jbig2_new(ctx, uint32_t, referred_to_segment_count * referred_to_segment_size); if (referred_to_segments == NULL) { jbig2_error(ctx, JBIG2_SEVERITY_FATAL, -1, "could not allocate referred_to_segments " "in jbig2_parse_segment_header"); return NULL; } for (i = 0; i < referred_to_segment_count; i++) { referred_to_segments[i] = (referred_to_segment_size == 1) ? buf[offset] : (referred_to_segment_size == 2) ? jbig2_get_uint16(buf + offset) : jbig2_get_uint32(buf + offset); offset += referred_to_segment_size; jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, result->number, "segment %d refers to segment %d", result->number, referred_to_segments[i]); } result->referred_to_segments = referred_to_segments; } else { /* no referred-to segments */ result->referred_to_segments = NULL; } /* 7.2.6 */ if (result->flags & 0x40) { result->page_association = jbig2_get_uint32(buf + offset); offset += 4; } else { result->page_association = buf[offset++]; } jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, result->number, "segment %d is associated with page %d", result->number, result->page_association); /* 7.2.7 */ result->data_length = jbig2_get_uint32(buf + offset); *p_header_size = offset + 4; /* no body parsing results yet */ result->result = NULL; return result; }
165,497
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void Microtask::performCheckpoint() { v8::Isolate* isolate = v8::Isolate::GetCurrent(); V8PerIsolateData* isolateData = V8PerIsolateData::from(isolate); ASSERT(isolateData); if (isolateData->recursionLevel() || isolateData->performingMicrotaskCheckpoint() || isolateData->destructionPending() || ScriptForbiddenScope::isScriptForbidden()) return; isolateData->setPerformingMicrotaskCheckpoint(true); { V8RecursionScope recursionScope(isolate); isolate->RunMicrotasks(); } isolateData->setPerformingMicrotaskCheckpoint(false); } Commit Message: Correctly keep track of isolates for microtask execution BUG=487155 [email protected] Review URL: https://codereview.chromium.org/1161823002 git-svn-id: svn://svn.chromium.org/blink/trunk@195985 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-254
void Microtask::performCheckpoint() void Microtask::performCheckpoint(v8::Isolate* isolate) { V8PerIsolateData* isolateData = V8PerIsolateData::from(isolate); ASSERT(isolateData); if (isolateData->recursionLevel() || isolateData->performingMicrotaskCheckpoint() || isolateData->destructionPending() || ScriptForbiddenScope::isScriptForbidden()) return; isolateData->setPerformingMicrotaskCheckpoint(true); { V8RecursionScope recursionScope(isolate); isolate->RunMicrotasks(); } isolateData->setPerformingMicrotaskCheckpoint(false); }
171,945
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: int URI_FUNC(ComposeQueryEngine)(URI_CHAR * dest, const URI_TYPE(QueryList) * queryList, int maxChars, int * charsWritten, int * charsRequired, UriBool spaceToPlus, UriBool normalizeBreaks) { UriBool firstItem = URI_TRUE; int ampersandLen = 0; /* increased to 1 from second item on */ URI_CHAR * write = dest; /* Subtract terminator */ if (dest == NULL) { *charsRequired = 0; } else { maxChars--; } while (queryList != NULL) { const URI_CHAR * const key = queryList->key; const URI_CHAR * const value = queryList->value; const int worstCase = (normalizeBreaks == URI_TRUE ? 6 : 3); const int keyLen = (key == NULL) ? 0 : (int)URI_STRLEN(key); const int keyRequiredChars = worstCase * keyLen; const int valueLen = (value == NULL) ? 0 : (int)URI_STRLEN(value); const int valueRequiredChars = worstCase * valueLen; if (dest == NULL) { (*charsRequired) += ampersandLen + keyRequiredChars + ((value == NULL) ? 0 : 1 + valueRequiredChars); if (firstItem == URI_TRUE) { ampersandLen = 1; firstItem = URI_FALSE; } } else { if ((write - dest) + ampersandLen + keyRequiredChars > maxChars) { return URI_ERROR_OUTPUT_TOO_LARGE; } /* Copy key */ if (firstItem == URI_TRUE) { ampersandLen = 1; firstItem = URI_FALSE; } else { write[0] = _UT('&'); write++; } write = URI_FUNC(EscapeEx)(key, key + keyLen, write, spaceToPlus, normalizeBreaks); if (value != NULL) { if ((write - dest) + 1 + valueRequiredChars > maxChars) { return URI_ERROR_OUTPUT_TOO_LARGE; } /* Copy value */ write[0] = _UT('='); write++; write = URI_FUNC(EscapeEx)(value, value + valueLen, write, spaceToPlus, normalizeBreaks); } } queryList = queryList->next; } if (dest != NULL) { write[0] = _UT('\0'); if (charsWritten != NULL) { *charsWritten = (int)(write - dest) + 1; /* .. for terminator */ } } return URI_SUCCESS; } Commit Message: UriQuery.c: Catch integer overflow in ComposeQuery and ...Ex CWE ID: CWE-190
int URI_FUNC(ComposeQueryEngine)(URI_CHAR * dest, const URI_TYPE(QueryList) * queryList, int maxChars, int * charsWritten, int * charsRequired, UriBool spaceToPlus, UriBool normalizeBreaks) { UriBool firstItem = URI_TRUE; int ampersandLen = 0; /* increased to 1 from second item on */ URI_CHAR * write = dest; /* Subtract terminator */ if (dest == NULL) { *charsRequired = 0; } else { maxChars--; } while (queryList != NULL) { const URI_CHAR * const key = queryList->key; const URI_CHAR * const value = queryList->value; const int worstCase = (normalizeBreaks == URI_TRUE ? 6 : 3); const int keyLen = (key == NULL) ? 0 : (int)URI_STRLEN(key); int keyRequiredChars; const int valueLen = (value == NULL) ? 0 : (int)URI_STRLEN(value); int valueRequiredChars; if ((keyLen >= INT_MAX / worstCase) || (valueLen >= INT_MAX / worstCase)) { return URI_ERROR_OUTPUT_TOO_LARGE; } keyRequiredChars = worstCase * keyLen; valueRequiredChars = worstCase * valueLen; if (dest == NULL) { (*charsRequired) += ampersandLen + keyRequiredChars + ((value == NULL) ? 0 : 1 + valueRequiredChars); if (firstItem == URI_TRUE) { ampersandLen = 1; firstItem = URI_FALSE; } } else { if ((write - dest) + ampersandLen + keyRequiredChars > maxChars) { return URI_ERROR_OUTPUT_TOO_LARGE; } /* Copy key */ if (firstItem == URI_TRUE) { ampersandLen = 1; firstItem = URI_FALSE; } else { write[0] = _UT('&'); write++; } write = URI_FUNC(EscapeEx)(key, key + keyLen, write, spaceToPlus, normalizeBreaks); if (value != NULL) { if ((write - dest) + 1 + valueRequiredChars > maxChars) { return URI_ERROR_OUTPUT_TOO_LARGE; } /* Copy value */ write[0] = _UT('='); write++; write = URI_FUNC(EscapeEx)(value, value + valueLen, write, spaceToPlus, normalizeBreaks); } } queryList = queryList->next; } if (dest != NULL) { write[0] = _UT('\0'); if (charsWritten != NULL) { *charsWritten = (int)(write - dest) + 1; /* .. for terminator */ } } return URI_SUCCESS; }
168,975
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: int ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *a, ASN1_BIT_STRING *signature, void *asn, EVP_PKEY *pkey) { EVP_MD_CTX ctx; const EVP_MD *type = NULL; unsigned char *buf_in=NULL; int ret= -1,inl; int mdnid, pknid; EVP_MD_CTX_init(&ctx); /* Convert signature OID into digest and public key OIDs */ if (type == NULL) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_UNKNOWN_MESSAGE_DIGEST_ALGORITHM); goto err; } /* Check public key OID matches public key type */ if (EVP_PKEY_type(pknid) != pkey->ameth->pkey_id) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_WRONG_PUBLIC_KEY_TYPE); goto err; } if (!EVP_VerifyInit_ex(&ctx,type, NULL)) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB); ret=0; goto err; } inl = ASN1_item_i2d(asn, &buf_in, it); if (buf_in == NULL) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_MALLOC_FAILURE); goto err; } EVP_VerifyUpdate(&ctx,(unsigned char *)buf_in,inl); OPENSSL_cleanse(buf_in,(unsigned int)inl); OPENSSL_free(buf_in); if (EVP_VerifyFinal(&ctx,(unsigned char *)signature->data, (unsigned int)signature->length,pkey) <= 0) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB); ret=0; goto err; } /* we don't need to zero the 'ctx' because we just checked * public information */ /* memset(&ctx,0,sizeof(ctx)); */ ret=1; err: EVP_MD_CTX_cleanup(&ctx); return(ret); } Commit Message: CWE ID: CWE-310
int ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *a, ASN1_BIT_STRING *signature, void *asn, EVP_PKEY *pkey) { EVP_MD_CTX ctx; const EVP_MD *type = NULL; unsigned char *buf_in=NULL; int ret= -1,inl; int mdnid, pknid; if (!pkey) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY, ERR_R_PASSED_NULL_PARAMETER); return -1; } EVP_MD_CTX_init(&ctx); /* Convert signature OID into digest and public key OIDs */ if (type == NULL) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_UNKNOWN_MESSAGE_DIGEST_ALGORITHM); goto err; } /* Check public key OID matches public key type */ if (EVP_PKEY_type(pknid) != pkey->ameth->pkey_id) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_WRONG_PUBLIC_KEY_TYPE); goto err; } if (!EVP_VerifyInit_ex(&ctx,type, NULL)) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB); ret=0; goto err; } inl = ASN1_item_i2d(asn, &buf_in, it); if (buf_in == NULL) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_MALLOC_FAILURE); goto err; } EVP_VerifyUpdate(&ctx,(unsigned char *)buf_in,inl); OPENSSL_cleanse(buf_in,(unsigned int)inl); OPENSSL_free(buf_in); if (EVP_VerifyFinal(&ctx,(unsigned char *)signature->data, (unsigned int)signature->length,pkey) <= 0) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB); ret=0; goto err; } /* we don't need to zero the 'ctx' because we just checked * public information */ /* memset(&ctx,0,sizeof(ctx)); */ ret=1; err: EVP_MD_CTX_cleanup(&ctx); return(ret); }
164,793
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: int GetFreeFrameBuffer(size_t min_size, vpx_codec_frame_buffer_t *fb) { EXPECT_TRUE(fb != NULL); const int idx = FindFreeBufferIndex(); if (idx == num_buffers_) return -1; if (ext_fb_list_[idx].size < min_size) { delete [] ext_fb_list_[idx].data; ext_fb_list_[idx].data = new uint8_t[min_size]; ext_fb_list_[idx].size = min_size; } SetFrameBuffer(idx, fb); return 0; } Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478 DO NOT MERGE - libvpx: Pull from upstream Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06 BUG=23452792 Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec CWE ID: CWE-119
int GetFreeFrameBuffer(size_t min_size, vpx_codec_frame_buffer_t *fb) { EXPECT_TRUE(fb != NULL); const int idx = FindFreeBufferIndex(); if (idx == num_buffers_) return -1; if (ext_fb_list_[idx].size < min_size) { delete [] ext_fb_list_[idx].data; ext_fb_list_[idx].data = new uint8_t[min_size]; memset(ext_fb_list_[idx].data, 0, min_size); ext_fb_list_[idx].size = min_size; } SetFrameBuffer(idx, fb); return 0; }
174,544
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void FrameImpl::GoBack() { NOTIMPLEMENTED(); } Commit Message: [fuchsia] Implement browser tests for WebRunner Context service. Tests may interact with the WebRunner FIDL services and the underlying browser objects for end to end testing of service and browser functionality. * Add a browser test launcher main() for WebRunner. * Add some simple navigation tests. * Wire up GoBack()/GoForward() FIDL calls. * Add embedded test server resources and initialization logic. * Add missing deletion & notification calls to BrowserContext dtor. * Use FIDL events for navigation state changes. * Bug fixes: ** Move BrowserContext and Screen deletion to PostMainMessageLoopRun(), so that they may use the MessageLoop during teardown. ** Fix Frame dtor to allow for null WindowTreeHosts (headless case) ** Fix std::move logic in Frame ctor which lead to no WebContents observer being registered. Bug: 871594 Change-Id: I36bcbd2436d534d366c6be4eeb54b9f9feadd1ac Reviewed-on: https://chromium-review.googlesource.com/1164539 Commit-Queue: Kevin Marshall <[email protected]> Reviewed-by: Wez <[email protected]> Reviewed-by: Fabrice de Gans-Riberi <[email protected]> Reviewed-by: Scott Violet <[email protected]> Cr-Commit-Position: refs/heads/master@{#584155} CWE ID: CWE-264
void FrameImpl::GoBack() { if (web_contents_->GetController().CanGoBack()) web_contents_->GetController().GoBack(); }
172,153
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void CoordinatorImpl::RequestGlobalMemoryDumpAndAppendToTrace( MemoryDumpType dump_type, MemoryDumpLevelOfDetail level_of_detail, const RequestGlobalMemoryDumpAndAppendToTraceCallback& callback) { auto adapter = [](const RequestGlobalMemoryDumpAndAppendToTraceCallback& callback, bool success, uint64_t dump_guid, mojom::GlobalMemoryDumpPtr) { callback.Run(success, dump_guid); }; QueuedRequest::Args args(dump_type, level_of_detail, {}, true /* add_to_trace */, base::kNullProcessId); RequestGlobalMemoryDumpInternal(args, base::BindRepeating(adapter, callback)); } Commit Message: memory-infra: split up memory-infra coordinator service into two This allows for heap profiler to use its own service with correct capabilities and all other instances to use the existing coordinator service. Bug: 792028 Change-Id: I84e4ec71f5f1d00991c0516b1424ce7334bcd3cd Reviewed-on: https://chromium-review.googlesource.com/836896 Commit-Queue: Lalit Maganti <[email protected]> Reviewed-by: Daniel Cheng <[email protected]> Reviewed-by: oysteine <[email protected]> Reviewed-by: Albert J. Wong <[email protected]> Reviewed-by: Hector Dearman <[email protected]> Cr-Commit-Position: refs/heads/master@{#529059} CWE ID: CWE-269
void CoordinatorImpl::RequestGlobalMemoryDumpAndAppendToTrace( MemoryDumpType dump_type, MemoryDumpLevelOfDetail level_of_detail, const RequestGlobalMemoryDumpAndAppendToTraceCallback& callback) { // Don't allow arbitary processes to obtain VM regions. Only the heap profiler // is allowed to obtain them using the special method on its own dedicated // interface (HeapProfilingHelper). if (level_of_detail == MemoryDumpLevelOfDetail::VM_REGIONS_ONLY_FOR_HEAP_PROFILER) { bindings_.ReportBadMessage( "Requested global memory dump using level of detail reserved for the " "heap profiler."); return; } auto adapter = [](const RequestGlobalMemoryDumpAndAppendToTraceCallback& callback, bool success, uint64_t dump_guid, mojom::GlobalMemoryDumpPtr) { callback.Run(success, dump_guid); }; QueuedRequest::Args args(dump_type, level_of_detail, {}, true /* add_to_trace */, base::kNullProcessId); RequestGlobalMemoryDumpInternal(args, base::BindRepeating(adapter, callback)); }
172,916
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void *btif_hl_select_thread(void *arg){ fd_set org_set, curr_set; int r, max_curr_s, max_org_s; UNUSED(arg); BTIF_TRACE_DEBUG("entered btif_hl_select_thread"); FD_ZERO(&org_set); max_org_s = btif_hl_select_wakeup_init(&org_set); BTIF_TRACE_DEBUG("max_s=%d ", max_org_s); for (;;) { r = 0; BTIF_TRACE_DEBUG("set curr_set = org_set "); curr_set = org_set; max_curr_s = max_org_s; int ret = select((max_curr_s + 1), &curr_set, NULL, NULL, NULL); BTIF_TRACE_DEBUG("select unblocked ret=%d", ret); if (ret == -1) { BTIF_TRACE_DEBUG("select() ret -1, exit the thread"); btif_hl_thread_cleanup(); select_thread_id = -1; return 0; } else if (ret) { BTIF_TRACE_DEBUG("btif_hl_select_wake_signaled, signal ret=%d", ret); if (btif_hl_select_wake_signaled(&curr_set)) { r = btif_hl_select_wake_reset(); BTIF_TRACE_DEBUG("btif_hl_select_wake_signaled, signal:%d", r); if (r == btif_hl_signal_select_wakeup || r == btif_hl_signal_select_close_connected ) { btif_hl_select_wakeup_callback(&org_set, r); } else if( r == btif_hl_signal_select_exit) { btif_hl_thread_cleanup(); BTIF_TRACE_DEBUG("Exit hl_select_thread for btif_hl_signal_select_exit"); return 0; } } btif_hl_select_monitor_callback(&curr_set, &org_set); max_org_s = btif_hl_update_maxfd(max_org_s); } else BTIF_TRACE_DEBUG("no data, select ret: %d\n", ret); } BTIF_TRACE_DEBUG("leaving hl_select_thread"); return 0; } Commit Message: DO NOT MERGE Fix potential DoS caused by delivering signal to BT process Bug: 28885210 Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360 Conflicts: btif/co/bta_hh_co.c btif/src/btif_core.c Merge conflict resolution of ag/1161415 (referencing ag/1164670) - Directly into mnc-mr2-release CWE ID: CWE-284
static void *btif_hl_select_thread(void *arg){ fd_set org_set, curr_set; int r, max_curr_s, max_org_s; UNUSED(arg); BTIF_TRACE_DEBUG("entered btif_hl_select_thread"); FD_ZERO(&org_set); max_org_s = btif_hl_select_wakeup_init(&org_set); BTIF_TRACE_DEBUG("max_s=%d ", max_org_s); for (;;) { r = 0; BTIF_TRACE_DEBUG("set curr_set = org_set "); curr_set = org_set; max_curr_s = max_org_s; int ret = TEMP_FAILURE_RETRY(select((max_curr_s + 1), &curr_set, NULL, NULL, NULL)); BTIF_TRACE_DEBUG("select unblocked ret=%d", ret); if (ret == -1) { BTIF_TRACE_DEBUG("select() ret -1, exit the thread"); btif_hl_thread_cleanup(); select_thread_id = -1; return 0; } else if (ret) { BTIF_TRACE_DEBUG("btif_hl_select_wake_signaled, signal ret=%d", ret); if (btif_hl_select_wake_signaled(&curr_set)) { r = btif_hl_select_wake_reset(); BTIF_TRACE_DEBUG("btif_hl_select_wake_signaled, signal:%d", r); if (r == btif_hl_signal_select_wakeup || r == btif_hl_signal_select_close_connected ) { btif_hl_select_wakeup_callback(&org_set, r); } else if( r == btif_hl_signal_select_exit) { btif_hl_thread_cleanup(); BTIF_TRACE_DEBUG("Exit hl_select_thread for btif_hl_signal_select_exit"); return 0; } } btif_hl_select_monitor_callback(&curr_set, &org_set); max_org_s = btif_hl_update_maxfd(max_org_s); } else BTIF_TRACE_DEBUG("no data, select ret: %d\n", ret); } BTIF_TRACE_DEBUG("leaving hl_select_thread"); return 0; }
173,442
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void board_init_f_init_reserve(ulong base) { struct global_data *gd_ptr; /* * clear GD entirely and set it up. * Use gd_ptr, as gd may not be properly set yet. */ gd_ptr = (struct global_data *)base; /* zero the area */ memset(gd_ptr, '\0', sizeof(*gd)); /* set GD unless architecture did it already */ #if !defined(CONFIG_ARM) arch_setup_gd(gd_ptr); #endif if (CONFIG_IS_ENABLED(SYS_REPORT_STACK_F_USAGE)) board_init_f_init_stack_protection_addr(base); /* next alloc will be higher by one GD plus 16-byte alignment */ base += roundup(sizeof(struct global_data), 16); /* * record early malloc arena start. * Use gd as it is now properly set for all architectures. */ #if CONFIG_VAL(SYS_MALLOC_F_LEN) /* go down one 'early malloc arena' */ gd->malloc_base = base; /* next alloc will be higher by one 'early malloc arena' size */ base += CONFIG_VAL(SYS_MALLOC_F_LEN); #endif if (CONFIG_IS_ENABLED(SYS_REPORT_STACK_F_USAGE)) board_init_f_init_stack_protection(); } Commit Message: Merge branch '2020-01-22-master-imports' - Re-add U8500 platform support - Add bcm968360bg support - Assorted Keymile fixes - Other assorted bugfixes CWE ID: CWE-787
void board_init_f_init_reserve(ulong base) { struct global_data *gd_ptr; /* * clear GD entirely and set it up. * Use gd_ptr, as gd may not be properly set yet. */ gd_ptr = (struct global_data *)base; /* zero the area */ memset(gd_ptr, '\0', sizeof(*gd)); /* set GD unless architecture did it already */ #if !defined(CONFIG_ARM) arch_setup_gd(gd_ptr); #endif if (CONFIG_IS_ENABLED(SYS_REPORT_STACK_F_USAGE)) board_init_f_init_stack_protection_addr(base); /* next alloc will be higher by one GD plus 16-byte alignment */ base += roundup(sizeof(struct global_data), 16); /* * record early malloc arena start. * Use gd as it is now properly set for all architectures. */ #if CONFIG_VAL(SYS_MALLOC_F_LEN) /* go down one 'early malloc arena' */ gd->malloc_base = base; #endif if (CONFIG_IS_ENABLED(SYS_REPORT_STACK_F_USAGE)) board_init_f_init_stack_protection(); }
169,639
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: ext4_xattr_put_super(struct super_block *sb) { mb_cache_shrink(sb->s_bdev); } Commit Message: ext4: convert to mbcache2 The conversion is generally straightforward. The only tricky part is that xattr block corresponding to found mbcache entry can get freed before we get buffer lock for that block. So we have to check whether the entry is still valid after getting buffer lock. Signed-off-by: Jan Kara <[email protected]> Signed-off-by: Theodore Ts'o <[email protected]> CWE ID: CWE-19
ext4_xattr_put_super(struct super_block *sb)
169,995
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void FolderHeaderView::SetFolderItem(AppListFolderItem* folder_item) { if (folder_item_) folder_item_->RemoveObserver(this); folder_item_ = folder_item; if (!folder_item_) return; folder_item_->AddObserver(this); folder_name_view_->SetEnabled(folder_item->folder_type() != AppListFolderItem::FOLDER_TYPE_OEM); Update(); } Commit Message: Enforce the maximum length of the folder name in UI. BUG=355797 [email protected] Review URL: https://codereview.chromium.org/203863005 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@260156 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
void FolderHeaderView::SetFolderItem(AppListFolderItem* folder_item) { if (folder_item_) folder_item_->RemoveObserver(this); folder_item_ = folder_item; if (!folder_item_) return; folder_item_->AddObserver(this); folder_name_view_->SetEnabled(folder_item_->folder_type() != AppListFolderItem::FOLDER_TYPE_OEM); Update(); }
171,201
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: mwifiex_set_wmm_params(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_ap_settings *params) { const u8 *vendor_ie; const u8 *wmm_ie; u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02}; vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WMM, params->beacon.tail, params->beacon.tail_len); if (vendor_ie) { wmm_ie = vendor_ie; memcpy(&bss_cfg->wmm_info, wmm_ie + sizeof(struct ieee_types_header), *(wmm_ie + 1)); priv->wmm_enabled = 1; } else { memset(&bss_cfg->wmm_info, 0, sizeof(bss_cfg->wmm_info)); memcpy(&bss_cfg->wmm_info.oui, wmm_oui, sizeof(wmm_oui)); bss_cfg->wmm_info.subtype = MWIFIEX_WMM_SUBTYPE; bss_cfg->wmm_info.version = MWIFIEX_WMM_VERSION; priv->wmm_enabled = 0; } bss_cfg->qos_info = 0x00; return; } Commit Message: mwifiex: Fix three heap overflow at parsing element in cfg80211_ap_settings mwifiex_update_vs_ie(),mwifiex_set_uap_rates() and mwifiex_set_wmm_params() call memcpy() without checking the destination size.Since the source is given from user-space, this may trigger a heap buffer overflow. Fix them by putting the length check before performing memcpy(). This fix addresses CVE-2019-14814,CVE-2019-14815,CVE-2019-14816. Signed-off-by: Wen Huang <[email protected]> Acked-by: Ganapathi Bhat <[email protected]> Signed-off-by: Kalle Valo <[email protected]> CWE ID: CWE-120
mwifiex_set_wmm_params(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_ap_settings *params) { const u8 *vendor_ie; const u8 *wmm_ie; u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02}; vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WMM, params->beacon.tail, params->beacon.tail_len); if (vendor_ie) { wmm_ie = vendor_ie; if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info)) return; memcpy(&bss_cfg->wmm_info, wmm_ie + sizeof(struct ieee_types_header), *(wmm_ie + 1)); priv->wmm_enabled = 1; } else { memset(&bss_cfg->wmm_info, 0, sizeof(bss_cfg->wmm_info)); memcpy(&bss_cfg->wmm_info.oui, wmm_oui, sizeof(wmm_oui)); bss_cfg->wmm_info.subtype = MWIFIEX_WMM_SUBTYPE; bss_cfg->wmm_info.version = MWIFIEX_WMM_VERSION; priv->wmm_enabled = 0; } bss_cfg->qos_info = 0x00; return; }
169,577
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int decode_dds1(GetByteContext *gb, uint8_t *frame, int width, int height) { const uint8_t *frame_start = frame; const uint8_t *frame_end = frame + width * height; int mask = 0x10000, bitbuf = 0; int i, v, offset, count, segments; segments = bytestream2_get_le16(gb); while (segments--) { if (bytestream2_get_bytes_left(gb) < 2) return AVERROR_INVALIDDATA; if (mask == 0x10000) { bitbuf = bytestream2_get_le16u(gb); mask = 1; } if (bitbuf & mask) { v = bytestream2_get_le16(gb); offset = (v & 0x1FFF) << 2; count = ((v >> 13) + 2) << 1; if (frame - frame_start < offset || frame_end - frame < count*2 + width) return AVERROR_INVALIDDATA; for (i = 0; i < count; i++) { frame[0] = frame[1] = frame[width] = frame[width + 1] = frame[-offset]; frame += 2; } } else if (bitbuf & (mask << 1)) { v = bytestream2_get_le16(gb)*2; if (frame - frame_end < v) return AVERROR_INVALIDDATA; frame += v; } else { if (frame_end - frame < width + 3) return AVERROR_INVALIDDATA; frame[0] = frame[1] = frame[width] = frame[width + 1] = bytestream2_get_byte(gb); frame += 2; frame[0] = frame[1] = frame[width] = frame[width + 1] = bytestream2_get_byte(gb); frame += 2; } mask <<= 2; } return 0; } Commit Message: avcodec/dfa: Fix off by 1 error Fixes out of array access Fixes: 1345/clusterfuzz-testcase-minimized-6062963045695488 Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/targets/ffmpeg Signed-off-by: Michael Niedermayer <[email protected]> CWE ID: CWE-119
static int decode_dds1(GetByteContext *gb, uint8_t *frame, int width, int height) { const uint8_t *frame_start = frame; const uint8_t *frame_end = frame + width * height; int mask = 0x10000, bitbuf = 0; int i, v, offset, count, segments; segments = bytestream2_get_le16(gb); while (segments--) { if (bytestream2_get_bytes_left(gb) < 2) return AVERROR_INVALIDDATA; if (mask == 0x10000) { bitbuf = bytestream2_get_le16u(gb); mask = 1; } if (bitbuf & mask) { v = bytestream2_get_le16(gb); offset = (v & 0x1FFF) << 2; count = ((v >> 13) + 2) << 1; if (frame - frame_start < offset || frame_end - frame < count*2 + width) return AVERROR_INVALIDDATA; for (i = 0; i < count; i++) { frame[0] = frame[1] = frame[width] = frame[width + 1] = frame[-offset]; frame += 2; } } else if (bitbuf & (mask << 1)) { v = bytestream2_get_le16(gb)*2; if (frame - frame_end < v) return AVERROR_INVALIDDATA; frame += v; } else { if (frame_end - frame < width + 4) return AVERROR_INVALIDDATA; frame[0] = frame[1] = frame[width] = frame[width + 1] = bytestream2_get_byte(gb); frame += 2; frame[0] = frame[1] = frame[width] = frame[width + 1] = bytestream2_get_byte(gb); frame += 2; } mask <<= 2; } return 0; }
168,074
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: image_transform_png_set_rgb_to_gray_ini(PNG_CONST image_transform *this, transform_display *that) { png_modifier *pm = that->pm; PNG_CONST color_encoding *e = pm->current_encoding; UNUSED(this) /* Since we check the encoding this flag must be set: */ pm->test_uses_encoding = 1; /* If 'e' is not NULL chromaticity information is present and either a cHRM * or an sRGB chunk will be inserted. */ if (e != 0) { /* Coefficients come from the encoding, but may need to be normalized to a * white point Y of 1.0 */ PNG_CONST double whiteY = e->red.Y + e->green.Y + e->blue.Y; data.red_coefficient = e->red.Y; data.green_coefficient = e->green.Y; data.blue_coefficient = e->blue.Y; if (whiteY != 1) { data.red_coefficient /= whiteY; data.green_coefficient /= whiteY; data.blue_coefficient /= whiteY; } } else { /* The default (built in) coeffcients, as above: */ data.red_coefficient = 6968 / 32768.; data.green_coefficient = 23434 / 32768.; data.blue_coefficient = 2366 / 32768.; } data.gamma = pm->current_gamma; /* If not set then the calculations assume linear encoding (implicitly): */ if (data.gamma == 0) data.gamma = 1; /* The arguments to png_set_rgb_to_gray can override the coefficients implied * by the color space encoding. If doing exhaustive checks do the override * in each case, otherwise do it randomly. */ if (pm->test_exhaustive) { /* First time in coefficients_overridden is 0, the following sets it to 1, * so repeat if it is set. If a test fails this may mean we subsequently * skip a non-override test, ignore that. */ data.coefficients_overridden = !data.coefficients_overridden; pm->repeat = data.coefficients_overridden != 0; } else data.coefficients_overridden = random_choice(); if (data.coefficients_overridden) { /* These values override the color encoding defaults, simply use random * numbers. */ png_uint_32 ru; double total; RANDOMIZE(ru); data.green_coefficient = total = (ru & 0xffff) / 65535.; ru >>= 16; data.red_coefficient = (1 - total) * (ru & 0xffff) / 65535.; total += data.red_coefficient; data.blue_coefficient = 1 - total; # ifdef PNG_FLOATING_POINT_SUPPORTED data.red_to_set = data.red_coefficient; data.green_to_set = data.green_coefficient; # else data.red_to_set = fix(data.red_coefficient); data.green_to_set = fix(data.green_coefficient); # endif /* The following just changes the error messages: */ pm->encoding_ignored = 1; } else { data.red_to_set = -1; data.green_to_set = -1; } /* Adjust the error limit in the png_modifier because of the larger errors * produced in the digitization during the gamma handling. */ if (data.gamma != 1) /* Use gamma tables */ { if (that->this.bit_depth == 16 || pm->assume_16_bit_calculations) { /* The computations have the form: * * r * rc + g * gc + b * bc * * Each component of which is +/-1/65535 from the gamma_to_1 table * lookup, resulting in a base error of +/-6. The gamma_from_1 * conversion adds another +/-2 in the 16-bit case and * +/-(1<<(15-PNG_MAX_GAMMA_8)) in the 8-bit case. */ that->pm->limit += pow( # if PNG_MAX_GAMMA_8 < 14 (that->this.bit_depth == 16 ? 8. : 6. + (1<<(15-PNG_MAX_GAMMA_8))) # else 8. # endif /65535, data.gamma); } else { /* Rounding to 8 bits in the linear space causes massive errors which * will trigger the error check in transform_range_check. Fix that * here by taking the gamma encoding into account. * * When DIGITIZE is set because a pre-1.7 version of libpng is being * tested allow a bigger slack. * * NOTE: this magic number was determined by experiment to be 1.1 (when * using fixed point arithmetic). There's no great merit to the value * below, however it only affects the limit used for checking for * internal calculation errors, not the actual limit imposed by * pngvalid on the output errors. */ that->pm->limit += pow( # if DIGITIZE 1.1 # else 1. # endif /255, data.gamma); } } else { /* With no gamma correction a large error comes from the truncation of the * calculation in the 8 bit case, allow for that here. */ if (that->this.bit_depth != 16 && !pm->assume_16_bit_calculations) that->pm->limit += 4E-3; } } Commit Message: DO NOT MERGE Update libpng to 1.6.20 BUG:23265085 Change-Id: I85199805636d771f3597b691b63bc0bf46084833 (cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82) CWE ID:
image_transform_png_set_rgb_to_gray_ini(PNG_CONST image_transform *this, image_transform_png_set_rgb_to_gray_ini(const image_transform *this, transform_display *that) { png_modifier *pm = that->pm; const color_encoding *e = pm->current_encoding; UNUSED(this) /* Since we check the encoding this flag must be set: */ pm->test_uses_encoding = 1; /* If 'e' is not NULL chromaticity information is present and either a cHRM * or an sRGB chunk will be inserted. */ if (e != 0) { /* Coefficients come from the encoding, but may need to be normalized to a * white point Y of 1.0 */ const double whiteY = e->red.Y + e->green.Y + e->blue.Y; data.red_coefficient = e->red.Y; data.green_coefficient = e->green.Y; data.blue_coefficient = e->blue.Y; if (whiteY != 1) { data.red_coefficient /= whiteY; data.green_coefficient /= whiteY; data.blue_coefficient /= whiteY; } } else { /* The default (built in) coeffcients, as above: */ # if PNG_LIBPNG_VER < 10700 data.red_coefficient = 6968 / 32768.; data.green_coefficient = 23434 / 32768.; data.blue_coefficient = 2366 / 32768.; # else data.red_coefficient = .2126; data.green_coefficient = .7152; data.blue_coefficient = .0722; # endif } data.gamma = pm->current_gamma; /* If not set then the calculations assume linear encoding (implicitly): */ if (data.gamma == 0) data.gamma = 1; /* The arguments to png_set_rgb_to_gray can override the coefficients implied * by the color space encoding. If doing exhaustive checks do the override * in each case, otherwise do it randomly. */ if (pm->test_exhaustive) { /* First time in coefficients_overridden is 0, the following sets it to 1, * so repeat if it is set. If a test fails this may mean we subsequently * skip a non-override test, ignore that. */ data.coefficients_overridden = !data.coefficients_overridden; pm->repeat = data.coefficients_overridden != 0; } else data.coefficients_overridden = random_choice(); if (data.coefficients_overridden) { /* These values override the color encoding defaults, simply use random * numbers. */ png_uint_32 ru; double total; RANDOMIZE(ru); data.green_coefficient = total = (ru & 0xffff) / 65535.; ru >>= 16; data.red_coefficient = (1 - total) * (ru & 0xffff) / 65535.; total += data.red_coefficient; data.blue_coefficient = 1 - total; # ifdef PNG_FLOATING_POINT_SUPPORTED data.red_to_set = data.red_coefficient; data.green_to_set = data.green_coefficient; # else data.red_to_set = fix(data.red_coefficient); data.green_to_set = fix(data.green_coefficient); # endif /* The following just changes the error messages: */ pm->encoding_ignored = 1; } else { data.red_to_set = -1; data.green_to_set = -1; } /* Adjust the error limit in the png_modifier because of the larger errors * produced in the digitization during the gamma handling. */ if (data.gamma != 1) /* Use gamma tables */ { if (that->this.bit_depth == 16 || pm->assume_16_bit_calculations) { /* The computations have the form: * * r * rc + g * gc + b * bc * * Each component of which is +/-1/65535 from the gamma_to_1 table * lookup, resulting in a base error of +/-6. The gamma_from_1 * conversion adds another +/-2 in the 16-bit case and * +/-(1<<(15-PNG_MAX_GAMMA_8)) in the 8-bit case. */ # if PNG_LIBPNG_VER < 10700 if (that->this.bit_depth < 16) that->max_gamma_8 = PNG_MAX_GAMMA_8; # endif that->pm->limit += pow( (that->this.bit_depth == 16 || that->max_gamma_8 > 14 ? 8. : 6. + (1<<(15-that->max_gamma_8)) )/65535, data.gamma); } else { /* Rounding to 8 bits in the linear space causes massive errors which * will trigger the error check in transform_range_check. Fix that * here by taking the gamma encoding into account. * * When DIGITIZE is set because a pre-1.7 version of libpng is being * tested allow a bigger slack. * * NOTE: this magic number was determined by experiment to be about * 1.263. There's no great merit to the value below, however it only * affects the limit used for checking for internal calculation errors, * not the actual limit imposed by pngvalid on the output errors. */ that->pm->limit += pow( # if DIGITIZE 1.3 # else 1.0 # endif /255, data.gamma); } } else { /* With no gamma correction a large error comes from the truncation of the * calculation in the 8 bit case, allow for that here. */ if (that->this.bit_depth != 16 && !pm->assume_16_bit_calculations) that->pm->limit += 4E-3; } }
173,642
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: stringprep_utf8_nfkc_normalize (const char *str, ssize_t len) { return g_utf8_normalize (str, len, G_NORMALIZE_NFKC); } Commit Message: CWE ID: CWE-125
stringprep_utf8_nfkc_normalize (const char *str, ssize_t len) { size_t n; if (len < 0) n = strlen (str); else n = len; if (u8_check ((const uint8_t *) str, n)) return NULL; return g_utf8_normalize (str, len, G_NORMALIZE_NFKC); }
164,984
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: long Cluster::GetEntry(long index, const mkvparser::BlockEntry*& pEntry) const { assert(m_pos >= m_element_start); pEntry = NULL; if (index < 0) return -1; //generic error if (m_entries_count < 0) return E_BUFFER_NOT_FULL; assert(m_entries); assert(m_entries_size > 0); assert(m_entries_count <= m_entries_size); if (index < m_entries_count) { pEntry = m_entries[index]; assert(pEntry); return 1; //found entry } if (m_element_size < 0) //we don't know cluster end yet return E_BUFFER_NOT_FULL; //underflow const long long element_stop = m_element_start + m_element_size; if (m_pos >= element_stop) return 0; //nothing left to parse return E_BUFFER_NOT_FULL; //underflow, since more remains to be parsed } Commit Message: libwebm: Pull from upstream Rolling mkvparser from upstream. Primarily for fixing a bug on parsing failures with certain Opus WebM files. Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae The diff is so huge because there were some style clean ups upstream. But it was ensured that there were no breaking changes when the style clean ups was done upstream. Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039 CWE ID: CWE-119
long Cluster::GetEntry(long index, const mkvparser::BlockEntry*& pEntry) const if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } unsigned char flags; status = pReader->Read(pos, 1, &flags);
174,314
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static char* get_icu_value_internal( const char* loc_name , char* tag_name, int* result , int fromParseLocale) { char* tag_value = NULL; int32_t tag_value_len = 512; int singletonPos = 0; char* mod_loc_name = NULL; int grOffset = 0; int32_t buflen = 512; UErrorCode status = U_ZERO_ERROR; if( strcmp(tag_name, LOC_CANONICALIZE_TAG) != 0 ){ /* Handle grandfathered languages */ grOffset = findOffset( LOC_GRANDFATHERED , loc_name ); if( grOffset >= 0 ){ if( strcmp(tag_name , LOC_LANG_TAG)==0 ){ return estrdup(loc_name); } else { /* Since Grandfathered , no value , do nothing , retutn NULL */ return NULL; } } if( fromParseLocale==1 ){ /* Handle singletons */ if( strcmp(tag_name , LOC_LANG_TAG)==0 ){ if( strlen(loc_name)>1 && (isIDPrefix(loc_name) == 1) ){ return estrdup(loc_name); } } singletonPos = getSingletonPos( loc_name ); if( singletonPos == 0){ /* singleton at start of script, region , variant etc. * or invalid singleton at start of language */ return NULL; } else if( singletonPos > 0 ){ /* singleton at some position except at start * strip off the singleton and rest of the loc_name */ mod_loc_name = estrndup ( loc_name , singletonPos-1); } } /* end of if fromParse */ } /* end of if != LOC_CANONICAL_TAG */ if( mod_loc_name == NULL){ mod_loc_name = estrdup(loc_name ); } /* Proceed to ICU */ do{ tag_value = erealloc( tag_value , buflen ); tag_value_len = buflen; if( strcmp(tag_name , LOC_SCRIPT_TAG)==0 ){ buflen = uloc_getScript ( mod_loc_name ,tag_value , tag_value_len , &status); } if( strcmp(tag_name , LOC_LANG_TAG )==0 ){ buflen = uloc_getLanguage ( mod_loc_name ,tag_value , tag_value_len , &status); } if( strcmp(tag_name , LOC_REGION_TAG)==0 ){ buflen = uloc_getCountry ( mod_loc_name ,tag_value , tag_value_len , &status); } if( strcmp(tag_name , LOC_VARIANT_TAG)==0 ){ buflen = uloc_getVariant ( mod_loc_name ,tag_value , tag_value_len , &status); } if( strcmp(tag_name , LOC_CANONICALIZE_TAG)==0 ){ buflen = uloc_canonicalize ( mod_loc_name ,tag_value , tag_value_len , &status); } if( U_FAILURE( status ) ) { if( status == U_BUFFER_OVERFLOW_ERROR ) { status = U_ZERO_ERROR; continue; } /* Error in retriving data */ *result = 0; if( tag_value ){ efree( tag_value ); } if( mod_loc_name ){ efree( mod_loc_name); } return NULL; } } while( buflen > tag_value_len ); if( buflen ==0 ){ /* No value found */ *result = -1; if( tag_value ){ efree( tag_value ); } if( mod_loc_name ){ efree( mod_loc_name); } return NULL; } else { *result = 1; } if( mod_loc_name ){ efree( mod_loc_name); } return tag_value; } Commit Message: Fix bug #72241: get_icu_value_internal out-of-bounds read CWE ID: CWE-125
static char* get_icu_value_internal( const char* loc_name , char* tag_name, int* result , int fromParseLocale) { char* tag_value = NULL; int32_t tag_value_len = 512; int singletonPos = 0; char* mod_loc_name = NULL; int grOffset = 0; int32_t buflen = 512; UErrorCode status = U_ZERO_ERROR; if( strcmp(tag_name, LOC_CANONICALIZE_TAG) != 0 ){ /* Handle grandfathered languages */ grOffset = findOffset( LOC_GRANDFATHERED , loc_name ); if( grOffset >= 0 ){ if( strcmp(tag_name , LOC_LANG_TAG)==0 ){ return estrdup(loc_name); } else { /* Since Grandfathered , no value , do nothing , retutn NULL */ return NULL; } } if( fromParseLocale==1 ){ /* Handle singletons */ if( strcmp(tag_name , LOC_LANG_TAG)==0 ){ if( strlen(loc_name)>1 && (isIDPrefix(loc_name) == 1) ){ return estrdup(loc_name); } } singletonPos = getSingletonPos( loc_name ); if( singletonPos == 0){ /* singleton at start of script, region , variant etc. * or invalid singleton at start of language */ return NULL; } else if( singletonPos > 0 ){ /* singleton at some position except at start * strip off the singleton and rest of the loc_name */ mod_loc_name = estrndup ( loc_name , singletonPos-1); } } /* end of if fromParse */ } /* end of if != LOC_CANONICAL_TAG */ if( mod_loc_name == NULL){ mod_loc_name = estrdup(loc_name ); } /* Proceed to ICU */ do{ tag_value = erealloc( tag_value , buflen ); tag_value_len = buflen; if( strcmp(tag_name , LOC_SCRIPT_TAG)==0 ){ buflen = uloc_getScript ( mod_loc_name ,tag_value , tag_value_len , &status); } if( strcmp(tag_name , LOC_LANG_TAG )==0 ){ buflen = uloc_getLanguage ( mod_loc_name ,tag_value , tag_value_len , &status); } if( strcmp(tag_name , LOC_REGION_TAG)==0 ){ buflen = uloc_getCountry ( mod_loc_name ,tag_value , tag_value_len , &status); } if( strcmp(tag_name , LOC_VARIANT_TAG)==0 ){ buflen = uloc_getVariant ( mod_loc_name ,tag_value , tag_value_len , &status); } if( strcmp(tag_name , LOC_CANONICALIZE_TAG)==0 ){ buflen = uloc_canonicalize ( mod_loc_name ,tag_value , tag_value_len , &status); } if( U_FAILURE( status ) ) { if( status == U_BUFFER_OVERFLOW_ERROR ) { status = U_ZERO_ERROR; buflen++; /* add space for \0 */ continue; } /* Error in retriving data */ *result = 0; if( tag_value ){ efree( tag_value ); } if( mod_loc_name ){ efree( mod_loc_name); } return NULL; } } while( buflen > tag_value_len ); if( buflen ==0 ){ /* No value found */ *result = -1; if( tag_value ){ efree( tag_value ); } if( mod_loc_name ){ efree( mod_loc_name); } return NULL; } else { *result = 1; } if( mod_loc_name ){ efree( mod_loc_name); } return tag_value; }
167,205
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readargs *args) { unsigned int len; int v; u32 max_blocksize = svc_max_payload(rqstp); p = decode_fh(p, &args->fh); if (!p) return 0; p = xdr_decode_hyper(p, &args->offset); args->count = ntohl(*p++); len = min(args->count, max_blocksize); /* set up the kvec */ v=0; while (len > 0) { struct page *p = *(rqstp->rq_next_page++); rqstp->rq_vec[v].iov_base = page_address(p); rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE); len -= rqstp->rq_vec[v].iov_len; v++; } args->vlen = v; return xdr_argsize_check(rqstp, p); } Commit Message: Merge tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux Pull nfsd updates from Bruce Fields: "Another RDMA update from Chuck Lever, and a bunch of miscellaneous bugfixes" * tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux: (26 commits) nfsd: Fix up the "supattr_exclcreat" attributes nfsd: encoders mustn't use unitialized values in error cases nfsd: fix undefined behavior in nfsd4_layout_verify lockd: fix lockd shutdown race NFSv4: Fix callback server shutdown SUNRPC: Refactor svc_set_num_threads() NFSv4.x/callback: Create the callback service through svc_create_pooled lockd: remove redundant check on block svcrdma: Clean out old XDR encoders svcrdma: Remove the req_map cache svcrdma: Remove unused RDMA Write completion handler svcrdma: Reduce size of sge array in struct svc_rdma_op_ctxt svcrdma: Clean up RPC-over-RDMA backchannel reply processing svcrdma: Report Write/Reply chunk overruns svcrdma: Clean up RDMA_ERROR path svcrdma: Use rdma_rw API in RPC reply path svcrdma: Introduce local rdma_rw API helpers svcrdma: Clean up svc_rdma_get_inv_rkey() svcrdma: Add helper to save pages under I/O svcrdma: Eliminate RPCRDMA_SQ_DEPTH_MULT ... CWE ID: CWE-404
nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readargs *args) { unsigned int len; int v; u32 max_blocksize = svc_max_payload(rqstp); p = decode_fh(p, &args->fh); if (!p) return 0; p = xdr_decode_hyper(p, &args->offset); args->count = ntohl(*p++); if (!xdr_argsize_check(rqstp, p)) return 0; len = min(args->count, max_blocksize); /* set up the kvec */ v=0; while (len > 0) { struct page *p = *(rqstp->rq_next_page++); rqstp->rq_vec[v].iov_base = page_address(p); rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE); len -= rqstp->rq_vec[v].iov_len; v++; } args->vlen = v; return 1; }
168,140
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: int inet_sk_rebuild_header(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0); __be32 daddr; int err; /* Route is OK, nothing to do. */ if (rt) return 0; /* Reroute. */ daddr = inet->inet_daddr; if (inet->opt && inet->opt->srr) daddr = inet->opt->faddr; rt = ip_route_output_ports(sock_net(sk), sk, daddr, inet->inet_saddr, inet->inet_dport, inet->inet_sport, sk->sk_protocol, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); if (!IS_ERR(rt)) { err = 0; sk_setup_caps(sk, &rt->dst); } else { err = PTR_ERR(rt); /* Routing failed... */ sk->sk_route_caps = 0; /* * Other protocols have to map its equivalent state to TCP_SYN_SENT. * DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme */ if (!sysctl_ip_dynaddr || sk->sk_state != TCP_SYN_SENT || (sk->sk_userlocks & SOCK_BINDADDR_LOCK) || (err = inet_sk_reselect_saddr(sk)) != 0) sk->sk_err_soft = -err; } return err; } Commit Message: inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <[email protected]> Cc: Herbert Xu <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-362
int inet_sk_rebuild_header(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0); __be32 daddr; struct ip_options_rcu *inet_opt; int err; /* Route is OK, nothing to do. */ if (rt) return 0; /* Reroute. */ rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); daddr = inet->inet_daddr; if (inet_opt && inet_opt->opt.srr) daddr = inet_opt->opt.faddr; rcu_read_unlock(); rt = ip_route_output_ports(sock_net(sk), sk, daddr, inet->inet_saddr, inet->inet_dport, inet->inet_sport, sk->sk_protocol, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); if (!IS_ERR(rt)) { err = 0; sk_setup_caps(sk, &rt->dst); } else { err = PTR_ERR(rt); /* Routing failed... */ sk->sk_route_caps = 0; /* * Other protocols have to map its equivalent state to TCP_SYN_SENT. * DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme */ if (!sysctl_ip_dynaddr || sk->sk_state != TCP_SYN_SENT || (sk->sk_userlocks & SOCK_BINDADDR_LOCK) || (err = inet_sk_reselect_saddr(sk)) != 0) sk->sk_err_soft = -err; } return err; }
165,543
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ResourceDispatcherHostImpl::PauseRequest(int child_id, int request_id, bool pause) { GlobalRequestID global_id(child_id, request_id); PendingRequestList::iterator i = pending_requests_.find(global_id); if (i == pending_requests_.end()) { DVLOG(1) << "Pausing a request that wasn't found"; return; } ResourceRequestInfoImpl* info = ResourceRequestInfoImpl::ForRequest(i->second); int pause_count = info->pause_count() + (pause ? 1 : -1); if (pause_count < 0) { NOTREACHED(); // Unbalanced call to pause. return; } info->set_pause_count(pause_count); VLOG(1) << "To pause (" << pause << "): " << i->second->url().spec(); if (info->pause_count() == 0) { MessageLoop::current()->PostTask(FROM_HERE, base::Bind( &ResourceDispatcherHostImpl::ResumeRequest, weak_factory_.GetWeakPtr(), global_id)); } } Commit Message: Inherits SupportsWeakPtr<T> instead of having WeakPtrFactory<T> This change refines r137676. BUG=122654 TEST=browser_test Review URL: https://chromiumcodereview.appspot.com/10332233 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@139771 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-119
void ResourceDispatcherHostImpl::PauseRequest(int child_id, int request_id, bool pause) { GlobalRequestID global_id(child_id, request_id); PendingRequestList::iterator i = pending_requests_.find(global_id); if (i == pending_requests_.end()) { DVLOG(1) << "Pausing a request that wasn't found"; return; } ResourceRequestInfoImpl* info = ResourceRequestInfoImpl::ForRequest(i->second); int pause_count = info->pause_count() + (pause ? 1 : -1); if (pause_count < 0) { NOTREACHED(); // Unbalanced call to pause. return; } info->set_pause_count(pause_count); VLOG(1) << "To pause (" << pause << "): " << i->second->url().spec(); if (info->pause_count() == 0) { MessageLoop::current()->PostTask(FROM_HERE, base::Bind(&ResourceDispatcherHostImpl::ResumeRequest, AsWeakPtr(), global_id)); } }
170,990
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: chrand_principal_2_svc(chrand_arg *arg, struct svc_req *rqstp) { static chrand_ret ret; krb5_keyblock *k; int nkeys; char *prime_arg, *funcname; gss_buffer_desc client_name, service_name; OM_uint32 minor_stat; kadm5_server_handle_t handle; const char *errmsg = NULL; xdr_free(xdr_chrand_ret, &ret); if ((ret.code = new_server_handle(arg->api_version, rqstp, &handle))) goto exit_func; if ((ret.code = check_handle((void *)handle))) goto exit_func; ret.api_version = handle->api_version; funcname = "kadm5_randkey_principal"; if (setup_gss_names(rqstp, &client_name, &service_name) < 0) { ret.code = KADM5_FAILURE; goto exit_func; } if (krb5_unparse_name(handle->context, arg->princ, &prime_arg)) { ret.code = KADM5_BAD_PRINCIPAL; goto exit_func; } if (cmp_gss_krb5_name(handle, rqst2name(rqstp), arg->princ)) { ret.code = randkey_principal_wrapper_3((void *)handle, arg->princ, FALSE, 0, NULL, &k, &nkeys); } else if (!(CHANGEPW_SERVICE(rqstp)) && kadm5int_acl_check(handle->context, rqst2name(rqstp), ACL_CHANGEPW, arg->princ, NULL)) { ret.code = kadm5_randkey_principal((void *)handle, arg->princ, &k, &nkeys); } else { log_unauth(funcname, prime_arg, &client_name, &service_name, rqstp); ret.code = KADM5_AUTH_CHANGEPW; } if(ret.code == KADM5_OK) { ret.keys = k; ret.n_keys = nkeys; } if(ret.code != KADM5_AUTH_CHANGEPW) { if( ret.code != 0 ) errmsg = krb5_get_error_message(handle->context, ret.code); log_done(funcname, prime_arg, errmsg, &client_name, &service_name, rqstp); if (errmsg != NULL) krb5_free_error_message(handle->context, errmsg); } free(prime_arg); gss_release_buffer(&minor_stat, &client_name); gss_release_buffer(&minor_stat, &service_name); exit_func: free_server_handle(handle); return &ret; } Commit Message: Fix leaks in kadmin server stubs [CVE-2015-8631] In each kadmind server stub, initialize the client_name and server_name variables, and release them in the cleanup handler. Many of the stubs will otherwise leak the client and server name if krb5_unparse_name() fails. Also make sure to free the prime_arg variables in rename_principal_2_svc(), or we can leak the first one if unparsing the second one fails. Discovered by Simo Sorce. CVE-2015-8631: In all versions of MIT krb5, an authenticated attacker can cause kadmind to leak memory by supplying a null principal name in a request which uses one. Repeating these requests will eventually cause kadmind to exhaust all available memory. CVSSv2 Vector: AV:N/AC:L/Au:S/C:N/I:N/A:C/E:POC/RL:OF/RC:C ticket: 8343 (new) target_version: 1.14-next target_version: 1.13-next tags: pullup CWE ID: CWE-119
chrand_principal_2_svc(chrand_arg *arg, struct svc_req *rqstp) { static chrand_ret ret; krb5_keyblock *k; int nkeys; char *prime_arg, *funcname; gss_buffer_desc client_name = GSS_C_EMPTY_BUFFER; gss_buffer_desc service_name = GSS_C_EMPTY_BUFFER; OM_uint32 minor_stat; kadm5_server_handle_t handle; const char *errmsg = NULL; xdr_free(xdr_chrand_ret, &ret); if ((ret.code = new_server_handle(arg->api_version, rqstp, &handle))) goto exit_func; if ((ret.code = check_handle((void *)handle))) goto exit_func; ret.api_version = handle->api_version; funcname = "kadm5_randkey_principal"; if (setup_gss_names(rqstp, &client_name, &service_name) < 0) { ret.code = KADM5_FAILURE; goto exit_func; } if (krb5_unparse_name(handle->context, arg->princ, &prime_arg)) { ret.code = KADM5_BAD_PRINCIPAL; goto exit_func; } if (cmp_gss_krb5_name(handle, rqst2name(rqstp), arg->princ)) { ret.code = randkey_principal_wrapper_3((void *)handle, arg->princ, FALSE, 0, NULL, &k, &nkeys); } else if (!(CHANGEPW_SERVICE(rqstp)) && kadm5int_acl_check(handle->context, rqst2name(rqstp), ACL_CHANGEPW, arg->princ, NULL)) { ret.code = kadm5_randkey_principal((void *)handle, arg->princ, &k, &nkeys); } else { log_unauth(funcname, prime_arg, &client_name, &service_name, rqstp); ret.code = KADM5_AUTH_CHANGEPW; } if(ret.code == KADM5_OK) { ret.keys = k; ret.n_keys = nkeys; } if(ret.code != KADM5_AUTH_CHANGEPW) { if( ret.code != 0 ) errmsg = krb5_get_error_message(handle->context, ret.code); log_done(funcname, prime_arg, errmsg, &client_name, &service_name, rqstp); if (errmsg != NULL) krb5_free_error_message(handle->context, errmsg); } free(prime_arg); exit_func: gss_release_buffer(&minor_stat, &client_name); gss_release_buffer(&minor_stat, &service_name); free_server_handle(handle); return &ret; }
167,507
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: MagickExport int LocaleLowercase(const int c) { #if defined(MAGICKCORE_LOCALE_SUPPORT) if (c_locale != (locale_t) NULL) return(tolower_l(c,c_locale)); #endif return(tolower(c)); } Commit Message: ... CWE ID: CWE-125
MagickExport int LocaleLowercase(const int c) { #if defined(MAGICKCORE_LOCALE_SUPPORT) if (c_locale != (locale_t) NULL) return(tolower_l((int) ((unsigned char) c),c_locale)); #endif return(tolower((int) ((unsigned char) c))); }
170,233
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void SetManualFallbacks(bool enabled) { std::vector<std::string> features = { password_manager::features::kEnableManualFallbacksFilling.name, password_manager::features::kEnableManualFallbacksFillingStandalone .name, password_manager::features::kEnableManualFallbacksGeneration.name}; if (enabled) { scoped_feature_list_.InitFromCommandLine(base::JoinString(features, ","), std::string()); } else { scoped_feature_list_.InitFromCommandLine(std::string(), base::JoinString(features, ",")); } } Commit Message: Fixing names of password_manager kEnableManualFallbacksFilling feature. Fixing names of password_manager kEnableManualFallbacksFilling feature as per the naming convention. Bug: 785953 Change-Id: I4a4baa1649fe9f02c3783a5e4c40bc75e717cc03 Reviewed-on: https://chromium-review.googlesource.com/900566 Reviewed-by: Vaclav Brozek <[email protected]> Commit-Queue: NIKHIL SAHNI <[email protected]> Cr-Commit-Position: refs/heads/master@{#534923} CWE ID: CWE-264
void SetManualFallbacks(bool enabled) { std::vector<std::string> features = { password_manager::features::kManualFallbacksFilling.name, password_manager::features::kEnableManualFallbacksFillingStandalone .name, password_manager::features::kEnableManualFallbacksGeneration.name}; if (enabled) { scoped_feature_list_.InitFromCommandLine(base::JoinString(features, ","), std::string()); } else { scoped_feature_list_.InitFromCommandLine(std::string(), base::JoinString(features, ",")); } }
171,749
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ChromeMockRenderThread::OnMsgOpenChannelToExtension( int routing_id, const std::string& source_extension_id, const std::string& target_extension_id, const std::string& channel_name, int* port_id) { *port_id = 0; } Commit Message: Print preview: Use an ID instead of memory pointer string in WebUI. BUG=144051 Review URL: https://chromiumcodereview.appspot.com/10870003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@153342 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-200
void ChromeMockRenderThread::OnMsgOpenChannelToExtension( int routing_id, const std::string& source_extension_id, const std::string& target_extension_id, const std::string& channel_name, int* port_id) { *port_id = 0; }
170,852
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; unsigned int unfrag_ip6hlen, unfrag_len; struct frag_hdr *fptr; u8 *mac_start, *prevhdr; u8 nexthdr; u8 frag_hdr_sz = sizeof(struct frag_hdr); int offset; __wsum csum; mss = skb_shinfo(skb)->gso_size; if (unlikely(skb->len <= mss)) goto out; if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { /* Packet is from an untrusted source, reset gso_segs. */ int type = skb_shinfo(skb)->gso_type; if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) || !(type & (SKB_GSO_UDP)))) goto out; skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); segs = NULL; goto out; } /* Do software UFO. Complete and fill in the UDP checksum as HW cannot * do checksum of UDP packets sent as multiple IP fragments. */ offset = skb->csum_start - skb_headroom(skb); csum = skb_checksum(skb, offset, skb->len- offset, 0); offset += skb->csum_offset; *(__sum16 *)(skb->data + offset) = csum_fold(csum); skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ if ((skb_headroom(skb) < frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out; /* Find the unfragmentable header and shift it left by frag_hdr_sz * bytes to insert fragment header. */ unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); nexthdr = *prevhdr; *prevhdr = NEXTHDR_FRAGMENT; unfrag_len = skb_network_header(skb) - skb_mac_header(skb) + unfrag_ip6hlen; mac_start = skb_mac_header(skb); memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len); skb->mac_header -= frag_hdr_sz; skb->network_header -= frag_hdr_sz; fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); fptr->nexthdr = nexthdr; fptr->reserved = 0; ipv6_select_ident(fptr); /* Fragment the skb. ipv6 header and the remaining fields of the * fragment header are updated in ipv6_gso_segment() */ segs = skb_segment(skb, features); out: return segs; } Commit Message: ipv6: udp: fix the wrong headroom check At this point, skb->data points to skb_transport_header. So, headroom check is wrong. For some case:bridge(UFO is on) + eth device(UFO is off), there is no enough headroom for IPv6 frag head. But headroom check is always false. This will bring about data be moved to there prior to skb->head, when adding IPv6 frag header to skb. Signed-off-by: Shan Wei <[email protected]> Acked-by: Herbert Xu <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-399
static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; unsigned int unfrag_ip6hlen, unfrag_len; struct frag_hdr *fptr; u8 *mac_start, *prevhdr; u8 nexthdr; u8 frag_hdr_sz = sizeof(struct frag_hdr); int offset; __wsum csum; mss = skb_shinfo(skb)->gso_size; if (unlikely(skb->len <= mss)) goto out; if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { /* Packet is from an untrusted source, reset gso_segs. */ int type = skb_shinfo(skb)->gso_type; if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) || !(type & (SKB_GSO_UDP)))) goto out; skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); segs = NULL; goto out; } /* Do software UFO. Complete and fill in the UDP checksum as HW cannot * do checksum of UDP packets sent as multiple IP fragments. */ offset = skb->csum_start - skb_headroom(skb); csum = skb_checksum(skb, offset, skb->len- offset, 0); offset += skb->csum_offset; *(__sum16 *)(skb->data + offset) = csum_fold(csum); skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out; /* Find the unfragmentable header and shift it left by frag_hdr_sz * bytes to insert fragment header. */ unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); nexthdr = *prevhdr; *prevhdr = NEXTHDR_FRAGMENT; unfrag_len = skb_network_header(skb) - skb_mac_header(skb) + unfrag_ip6hlen; mac_start = skb_mac_header(skb); memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len); skb->mac_header -= frag_hdr_sz; skb->network_header -= frag_hdr_sz; fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); fptr->nexthdr = nexthdr; fptr->reserved = 0; ipv6_select_ident(fptr); /* Fragment the skb. ipv6 header and the remaining fields of the * fragment header are updated in ipv6_gso_segment() */ segs = skb_segment(skb, features); out: return segs; }
165,681
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void IndexedDBCursor::RemoveCursorFromTransaction() { if (transaction_) transaction_->UnregisterOpenCursor(this); } Commit Message: [IndexedDB] Fix Cursor UAF If the connection is closed before we return a cursor, it dies in IndexedDBCallbacks::IOThreadHelper::SendSuccessCursor. It's deleted on the correct thread, but we also need to makes sure to remove it from its transaction. To make things simpler, we have the cursor remove itself from its transaction on destruction. R: [email protected] Bug: 728887 Change-Id: I8c76e6195c2490137a05213e47c635d12f4d3dd2 Reviewed-on: https://chromium-review.googlesource.com/526284 Commit-Queue: Daniel Murphy <[email protected]> Reviewed-by: Victor Costan <[email protected]> Cr-Commit-Position: refs/heads/master@{#477504} CWE ID: CWE-416
void IndexedDBCursor::RemoveCursorFromTransaction() {
172,307
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: DeviceOrientationController::DeviceOrientationController(Document* document) : DeviceSensorEventController(document) , DOMWindowLifecycleObserver(document->domWindow()) { } Commit Message: DevTools: remove references to modules/device_orientation from core BUG=340221 Review URL: https://codereview.chromium.org/150913003 git-svn-id: svn://svn.chromium.org/blink/trunk@166493 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID:
DeviceOrientationController::DeviceOrientationController(Document* document) : DeviceSensorEventController(document) , DOMWindowLifecycleObserver(document->domWindow()) { Page* page = document->page(); ASSERT(page); OwnPtr<DeviceOrientationInspectorAgent> deviceOrientationAgent(DeviceOrientationInspectorAgent::create(page)); InspectorController& inspectorController = page->inspectorController(); if (!inspectorController.hasAgent(deviceOrientationAgent.get()->name())) inspectorController.registerModuleAgent(deviceOrientationAgent.release()); }
171,404
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: PHP_NAMED_FUNCTION(zif_locale_set_default) { char* locale_name = NULL; int len=0; if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s", &locale_name ,&len ) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_set_default: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } if(len == 0) { locale_name = (char *)uloc_getDefault() ; len = strlen(locale_name); } zend_alter_ini_entry(LOCALE_INI_NAME, sizeof(LOCALE_INI_NAME), locale_name, len, PHP_INI_USER, PHP_INI_STAGE_RUNTIME); RETURN_TRUE; } Commit Message: Fix bug #72241: get_icu_value_internal out-of-bounds read CWE ID: CWE-125
PHP_NAMED_FUNCTION(zif_locale_set_default) { char* locale_name = NULL; int len=0; if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s", &locale_name ,&len ) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_set_default: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } if(len == 0) { locale_name = (char *)uloc_getDefault() ; len = strlen(locale_name); } zend_alter_ini_entry(LOCALE_INI_NAME, sizeof(LOCALE_INI_NAME), locale_name, len, PHP_INI_USER, PHP_INI_STAGE_RUNTIME); RETURN_TRUE; }
167,196
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static unsigned int unix_dgram_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk, *other; unsigned int mask, writable; sock_poll_wait(file, sk_sleep(sk), wait); mask = 0; /* exceptional events? */ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) mask |= POLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLRDHUP | POLLIN | POLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= POLLHUP; /* readable? */ if (!skb_queue_empty(&sk->sk_receive_queue)) mask |= POLLIN | POLLRDNORM; /* Connection-based need to check for termination and startup */ if (sk->sk_type == SOCK_SEQPACKET) { if (sk->sk_state == TCP_CLOSE) mask |= POLLHUP; /* connection hasn't started yet? */ if (sk->sk_state == TCP_SYN_SENT) return mask; } /* No write status requested, avoid expensive OUT tests. */ if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT))) return mask; writable = unix_writable(sk); other = unix_peer_get(sk); if (other) { if (unix_peer(other) != sk) { sock_poll_wait(file, &unix_sk(other)->peer_wait, wait); if (unix_recvq_full(other)) writable = 0; } sock_put(other); } if (writable) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; else set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); return mask; } Commit Message: unix: avoid use-after-free in ep_remove_wait_queue Rainer Weikusat <[email protected]> writes: An AF_UNIX datagram socket being the client in an n:1 association with some server socket is only allowed to send messages to the server if the receive queue of this socket contains at most sk_max_ack_backlog datagrams. This implies that prospective writers might be forced to go to sleep despite none of the message presently enqueued on the server receive queue were sent by them. In order to ensure that these will be woken up once space becomes again available, the present unix_dgram_poll routine does a second sock_poll_wait call with the peer_wait wait queue of the server socket as queue argument (unix_dgram_recvmsg does a wake up on this queue after a datagram was received). This is inherently problematic because the server socket is only guaranteed to remain alive for as long as the client still holds a reference to it. In case the connection is dissolved via connect or by the dead peer detection logic in unix_dgram_sendmsg, the server socket may be freed despite "the polling mechanism" (in particular, epoll) still has a pointer to the corresponding peer_wait queue. There's no way to forcibly deregister a wait queue with epoll. Based on an idea by Jason Baron, the patch below changes the code such that a wait_queue_t belonging to the client socket is enqueued on the peer_wait queue of the server whenever the peer receive queue full condition is detected by either a sendmsg or a poll. A wake up on the peer queue is then relayed to the ordinary wait queue of the client socket via wake function. The connection to the peer wait queue is again dissolved if either a wake up is about to be relayed or the client socket reconnects or a dead peer is detected or the client socket is itself closed. This enables removing the second sock_poll_wait from unix_dgram_poll, thus avoiding the use-after-free, while still ensuring that no blocked writer sleeps forever. Signed-off-by: Rainer Weikusat <[email protected]> Fixes: ec0d215f9420 ("af_unix: fix 'poll for write'/connected DGRAM sockets") Reviewed-by: Jason Baron <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID:
static unsigned int unix_dgram_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk, *other; unsigned int mask, writable; sock_poll_wait(file, sk_sleep(sk), wait); mask = 0; /* exceptional events? */ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) mask |= POLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLRDHUP | POLLIN | POLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= POLLHUP; /* readable? */ if (!skb_queue_empty(&sk->sk_receive_queue)) mask |= POLLIN | POLLRDNORM; /* Connection-based need to check for termination and startup */ if (sk->sk_type == SOCK_SEQPACKET) { if (sk->sk_state == TCP_CLOSE) mask |= POLLHUP; /* connection hasn't started yet? */ if (sk->sk_state == TCP_SYN_SENT) return mask; } /* No write status requested, avoid expensive OUT tests. */ if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT))) return mask; writable = unix_writable(sk); if (writable) { unix_state_lock(sk); other = unix_peer(sk); if (other && unix_peer(other) != sk && unix_recvq_full(other) && unix_dgram_peer_wake_me(sk, other)) writable = 0; unix_state_unlock(sk); } if (writable) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; else set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); return mask; }
166,835
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: gs_nulldevice(gs_gstate * pgs) { int code = 0; if (pgs->device == 0 || !gx_device_is_null(pgs->device)) { gx_device *ndev; code = gs_copydevice(&ndev, (const gx_device *)&gs_null_device, pgs->memory); if (code < 0) return code; /* * Internal devices have a reference count of 0, not 1, * aside from references from graphics states. to sort out how the icc profile is best handled with this device. It seems to inherit properties from the current device if there is one */ rc_init(ndev, pgs->memory, 0); if (pgs->device != NULL) { if ((code = dev_proc(pgs->device, get_profile)(pgs->device, &(ndev->icc_struct))) < 0) return code; rc_increment(ndev->icc_struct); set_dev_proc(ndev, get_profile, gx_default_get_profile); } if ((code = gs_setdevice_no_erase(pgs, ndev)) < 0) if ((code = gs_setdevice_no_erase(pgs, ndev)) < 0) gs_free_object(pgs->memory, ndev, "gs_copydevice(device)"); } return code; } Commit Message: CWE ID: CWE-78
gs_nulldevice(gs_gstate * pgs) { int code = 0; bool saveLockSafety = false; if (pgs->device == 0 || !gx_device_is_null(pgs->device)) { gx_device *ndev; code = gs_copydevice(&ndev, (const gx_device *)&gs_null_device, pgs->memory); if (code < 0) return code; if (gs_currentdevice_inline(pgs) != NULL) saveLockSafety = gs_currentdevice_inline(pgs)->LockSafetyParams; /* * Internal devices have a reference count of 0, not 1, * aside from references from graphics states. to sort out how the icc profile is best handled with this device. It seems to inherit properties from the current device if there is one */ rc_init(ndev, pgs->memory, 0); if (pgs->device != NULL) { if ((code = dev_proc(pgs->device, get_profile)(pgs->device, &(ndev->icc_struct))) < 0) return code; rc_increment(ndev->icc_struct); set_dev_proc(ndev, get_profile, gx_default_get_profile); } if ((code = gs_setdevice_no_erase(pgs, ndev)) < 0) if ((code = gs_setdevice_no_erase(pgs, ndev)) < 0) gs_free_object(pgs->memory, ndev, "gs_copydevice(device)"); gs_currentdevice_inline(pgs)->LockSafetyParams = saveLockSafety; } return code; }
164,687
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: v8::Local<v8::Value> V8ValueConverterImpl::ToV8Object( v8::Isolate* isolate, v8::Local<v8::Object> creation_context, const base::DictionaryValue* val) const { v8::Local<v8::Object> result(v8::Object::New(isolate)); for (base::DictionaryValue::Iterator iter(*val); !iter.IsAtEnd(); iter.Advance()) { const std::string& key = iter.key(); v8::Local<v8::Value> child_v8 = ToV8ValueImpl(isolate, creation_context, &iter.value()); CHECK(!child_v8.IsEmpty()); v8::TryCatch try_catch(isolate); result->Set( v8::String::NewFromUtf8( isolate, key.c_str(), v8::String::kNormalString, key.length()), child_v8); if (try_catch.HasCaught()) { LOG(ERROR) << "Setter for property " << key.c_str() << " threw an " << "exception."; } } return result; } Commit Message: V8ValueConverter::ToV8Value should not trigger setters BUG=606390 Review URL: https://codereview.chromium.org/1918793003 Cr-Commit-Position: refs/heads/master@{#390045} CWE ID:
v8::Local<v8::Value> V8ValueConverterImpl::ToV8Object( v8::Isolate* isolate, v8::Local<v8::Object> creation_context, const base::DictionaryValue* val) const { v8::Local<v8::Object> result(v8::Object::New(isolate)); // TODO(robwu): Callers should pass in the context. v8::Local<v8::Context> context = isolate->GetCurrentContext(); for (base::DictionaryValue::Iterator iter(*val); !iter.IsAtEnd(); iter.Advance()) { const std::string& key = iter.key(); v8::Local<v8::Value> child_v8 = ToV8ValueImpl(isolate, creation_context, &iter.value()); CHECK(!child_v8.IsEmpty()); v8::Maybe<bool> maybe = result->CreateDataProperty( context, v8::String::NewFromUtf8(isolate, key.c_str(), v8::String::kNormalString, key.length()), child_v8); if (!maybe.IsJust() || !maybe.FromJust()) LOG(ERROR) << "Failed to set property with key " << key; } return result; }
173,284
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static long ext4_zero_range(struct file *file, loff_t offset, loff_t len, int mode) { struct inode *inode = file_inode(file); handle_t *handle = NULL; unsigned int max_blocks; loff_t new_size = 0; int ret = 0; int flags; int credits; int partial_begin, partial_end; loff_t start, end; ext4_lblk_t lblk; struct address_space *mapping = inode->i_mapping; unsigned int blkbits = inode->i_blkbits; trace_ext4_zero_range(inode, offset, len, mode); if (!S_ISREG(inode->i_mode)) return -EINVAL; /* Call ext4_force_commit to flush all data in case of data=journal. */ if (ext4_should_journal_data(inode)) { ret = ext4_force_commit(inode->i_sb); if (ret) return ret; } /* * Write out all dirty pages to avoid race conditions * Then release them. */ if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1); if (ret) return ret; } /* * Round up offset. This is not fallocate, we neet to zero out * blocks, so convert interior block aligned part of the range to * unwritten and possibly manually zero out unaligned parts of the * range. */ start = round_up(offset, 1 << blkbits); end = round_down((offset + len), 1 << blkbits); if (start < offset || end > offset + len) return -EINVAL; partial_begin = offset & ((1 << blkbits) - 1); partial_end = (offset + len) & ((1 << blkbits) - 1); lblk = start >> blkbits; max_blocks = (end >> blkbits); if (max_blocks < lblk) max_blocks = 0; else max_blocks -= lblk; flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT | EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | EXT4_EX_NOCACHE; if (mode & FALLOC_FL_KEEP_SIZE) flags |= EXT4_GET_BLOCKS_KEEP_SIZE; mutex_lock(&inode->i_mutex); /* * Indirect files do not support unwritten extnets */ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { ret = -EOPNOTSUPP; goto out_mutex; } if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > i_size_read(inode)) { new_size = offset + len; ret = inode_newsize_ok(inode, new_size); if (ret) goto out_mutex; /* * If we have a partial block after EOF we have to allocate * the entire block. */ if (partial_end) max_blocks += 1; } if (max_blocks > 0) { /* Now release the pages and zero block aligned part of pages*/ truncate_pagecache_range(inode, start, end - 1); inode->i_mtime = inode->i_ctime = ext4_current_time(inode); /* Wait all existing dio workers, newcomers will block on i_mutex */ ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags, mode); if (ret) goto out_dio; /* * Remove entire range from the extent status tree. * * ext4_es_remove_extent(inode, lblk, max_blocks) is * NOT sufficient. I'm not sure why this is the case, * but let's be conservative and remove the extent * status tree for the entire inode. There should be * no outstanding delalloc extents thanks to the * filemap_write_and_wait_range() call above. */ ret = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); if (ret) goto out_dio; } if (!partial_begin && !partial_end) goto out_dio; /* * In worst case we have to writeout two nonadjacent unwritten * blocks and update the inode */ credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1; if (ext4_should_journal_data(inode)) credits += 2; handle = ext4_journal_start(inode, EXT4_HT_MISC, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); ext4_std_error(inode->i_sb, ret); goto out_dio; } inode->i_mtime = inode->i_ctime = ext4_current_time(inode); if (new_size) { ext4_update_inode_size(inode, new_size); } else { /* * Mark that we allocate beyond EOF so the subsequent truncate * can proceed even if the new size is the same as i_size. */ if ((offset + len) > i_size_read(inode)) ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); } ext4_mark_inode_dirty(handle, inode); /* Zero out partial block at the edges of the range */ ret = ext4_zero_partial_blocks(handle, inode, offset, len); if (file->f_flags & O_SYNC) ext4_handle_sync(handle); ext4_journal_stop(handle); out_dio: ext4_inode_resume_unlocked_dio(inode); out_mutex: mutex_unlock(&inode->i_mutex); return ret; } Commit Message: ext4: allocate entire range in zero range Currently there is a bug in zero range code which causes zero range calls to only allocate block aligned portion of the range, while ignoring the rest in some cases. In some cases, namely if the end of the range is past i_size, we do attempt to preallocate the last nonaligned block. However this might cause kernel to BUG() in some carefully designed zero range requests on setups where page size > block size. Fix this problem by first preallocating the entire range, including the nonaligned edges and converting the written extents to unwritten in the next step. This approach will also give us the advantage of having the range to be as linearly contiguous as possible. Signed-off-by: Lukas Czerner <[email protected]> Signed-off-by: Theodore Ts'o <[email protected]> CWE ID: CWE-17
static long ext4_zero_range(struct file *file, loff_t offset, loff_t len, int mode) { struct inode *inode = file_inode(file); handle_t *handle = NULL; unsigned int max_blocks; loff_t new_size = 0; int ret = 0; int flags; int credits; int partial_begin, partial_end; loff_t start, end; ext4_lblk_t lblk; struct address_space *mapping = inode->i_mapping; unsigned int blkbits = inode->i_blkbits; trace_ext4_zero_range(inode, offset, len, mode); if (!S_ISREG(inode->i_mode)) return -EINVAL; /* Call ext4_force_commit to flush all data in case of data=journal. */ if (ext4_should_journal_data(inode)) { ret = ext4_force_commit(inode->i_sb); if (ret) return ret; } /* * Write out all dirty pages to avoid race conditions * Then release them. */ if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1); if (ret) return ret; } /* * Round up offset. This is not fallocate, we neet to zero out * blocks, so convert interior block aligned part of the range to * unwritten and possibly manually zero out unaligned parts of the * range. */ start = round_up(offset, 1 << blkbits); end = round_down((offset + len), 1 << blkbits); if (start < offset || end > offset + len) return -EINVAL; partial_begin = offset & ((1 << blkbits) - 1); partial_end = (offset + len) & ((1 << blkbits) - 1); lblk = start >> blkbits; max_blocks = (end >> blkbits); if (max_blocks < lblk) max_blocks = 0; else max_blocks -= lblk; mutex_lock(&inode->i_mutex); /* * Indirect files do not support unwritten extnets */ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { ret = -EOPNOTSUPP; goto out_mutex; } if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > i_size_read(inode)) { new_size = offset + len; ret = inode_newsize_ok(inode, new_size); if (ret) goto out_mutex; } flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; if (mode & FALLOC_FL_KEEP_SIZE) flags |= EXT4_GET_BLOCKS_KEEP_SIZE; /* Preallocate the range including the unaligned edges */ if (partial_begin || partial_end) { ret = ext4_alloc_file_blocks(file, round_down(offset, 1 << blkbits) >> blkbits, (round_up((offset + len), 1 << blkbits) - round_down(offset, 1 << blkbits)) >> blkbits, new_size, flags, mode); if (ret) goto out_mutex; } /* Zero range excluding the unaligned edges */ if (max_blocks > 0) { flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | EXT4_EX_NOCACHE); /* Now release the pages and zero block aligned part of pages*/ truncate_pagecache_range(inode, start, end - 1); inode->i_mtime = inode->i_ctime = ext4_current_time(inode); /* Wait all existing dio workers, newcomers will block on i_mutex */ ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags, mode); if (ret) goto out_dio; /* * Remove entire range from the extent status tree. * * ext4_es_remove_extent(inode, lblk, max_blocks) is * NOT sufficient. I'm not sure why this is the case, * but let's be conservative and remove the extent * status tree for the entire inode. There should be * no outstanding delalloc extents thanks to the * filemap_write_and_wait_range() call above. */ ret = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); if (ret) goto out_dio; } if (!partial_begin && !partial_end) goto out_dio; /* * In worst case we have to writeout two nonadjacent unwritten * blocks and update the inode */ credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1; if (ext4_should_journal_data(inode)) credits += 2; handle = ext4_journal_start(inode, EXT4_HT_MISC, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); ext4_std_error(inode->i_sb, ret); goto out_dio; } inode->i_mtime = inode->i_ctime = ext4_current_time(inode); if (new_size) { ext4_update_inode_size(inode, new_size); } else { /* * Mark that we allocate beyond EOF so the subsequent truncate * can proceed even if the new size is the same as i_size. */ if ((offset + len) > i_size_read(inode)) ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); } ext4_mark_inode_dirty(handle, inode); /* Zero out partial block at the edges of the range */ ret = ext4_zero_partial_blocks(handle, inode, offset, len); if (file->f_flags & O_SYNC) ext4_handle_sync(handle); ext4_journal_stop(handle); out_dio: ext4_inode_resume_unlocked_dio(inode); out_mutex: mutex_unlock(&inode->i_mutex); return ret; }
166,729
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: int vrend_create_shader(struct vrend_context *ctx, uint32_t handle, const struct pipe_stream_output_info *so_info, const char *shd_text, uint32_t offlen, uint32_t num_tokens, uint32_t type, uint32_t pkt_length) { struct vrend_shader_selector *sel = NULL; int ret_handle; bool new_shader = true, long_shader = false; bool finished = false; int ret; if (type > PIPE_SHADER_GEOMETRY) return EINVAL; if (offlen & VIRGL_OBJ_SHADER_OFFSET_CONT) new_shader = false; else if (((offlen + 3) / 4) > pkt_length) long_shader = true; /* if we have an in progress one - don't allow a new shader of that type or a different handle. */ if (ctx->sub->long_shader_in_progress_handle[type]) { if (new_shader == true) return EINVAL; if (handle != ctx->sub->long_shader_in_progress_handle[type]) return EINVAL; } if (new_shader) { sel = vrend_create_shader_state(ctx, so_info, type); if (sel == NULL) return ENOMEM; if (long_shader) { sel->buf_len = ((offlen + 3) / 4) * 4; /* round up buffer size */ sel->tmp_buf = malloc(sel->buf_len); if (!sel->tmp_buf) { ret = ENOMEM; goto error; } memcpy(sel->tmp_buf, shd_text, pkt_length * 4); sel->buf_offset = pkt_length * 4; ctx->sub->long_shader_in_progress_handle[type] = handle; } else finished = true; } else { sel = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_SHADER); if (!sel) { fprintf(stderr, "got continuation without original shader %d\n", handle); ret = EINVAL; goto error; } offlen &= ~VIRGL_OBJ_SHADER_OFFSET_CONT; if (offlen != sel->buf_offset) { fprintf(stderr, "Got mismatched shader continuation %d vs %d\n", offlen, sel->buf_offset); ret = EINVAL; goto error; } if ((pkt_length * 4 + sel->buf_offset) > sel->buf_len) { fprintf(stderr, "Got too large shader continuation %d vs %d\n", pkt_length * 4 + sel->buf_offset, sel->buf_len); shd_text = sel->tmp_buf; } } if (finished) { struct tgsi_token *tokens; tokens = calloc(num_tokens + 10, sizeof(struct tgsi_token)); if (!tokens) { ret = ENOMEM; goto error; } if (vrend_dump_shaders) fprintf(stderr,"shader\n%s\n", shd_text); if (!tgsi_text_translate((const char *)shd_text, tokens, num_tokens + 10)) { free(tokens); ret = EINVAL; goto error; } if (vrend_finish_shader(ctx, sel, tokens)) { free(tokens); ret = EINVAL; goto error; } else { free(sel->tmp_buf); sel->tmp_buf = NULL; } free(tokens); ctx->sub->long_shader_in_progress_handle[type] = 0; } if (new_shader) { ret_handle = vrend_renderer_object_insert(ctx, sel, sizeof(*sel), handle, VIRGL_OBJECT_SHADER); if (ret_handle == 0) { ret = ENOMEM; goto error; } } return 0; error: if (new_shader) vrend_destroy_shader_selector(sel); else vrend_renderer_object_destroy(ctx, handle); return ret; } Commit Message: CWE ID: CWE-190
int vrend_create_shader(struct vrend_context *ctx, uint32_t handle, const struct pipe_stream_output_info *so_info, const char *shd_text, uint32_t offlen, uint32_t num_tokens, uint32_t type, uint32_t pkt_length) { struct vrend_shader_selector *sel = NULL; int ret_handle; bool new_shader = true, long_shader = false; bool finished = false; int ret; if (type > PIPE_SHADER_GEOMETRY) return EINVAL; if (offlen & VIRGL_OBJ_SHADER_OFFSET_CONT) new_shader = false; else if (((offlen + 3) / 4) > pkt_length) long_shader = true; /* if we have an in progress one - don't allow a new shader of that type or a different handle. */ if (ctx->sub->long_shader_in_progress_handle[type]) { if (new_shader == true) return EINVAL; if (handle != ctx->sub->long_shader_in_progress_handle[type]) return EINVAL; } if (new_shader) { sel = vrend_create_shader_state(ctx, so_info, type); if (sel == NULL) return ENOMEM; if (long_shader) { sel->buf_len = ((offlen + 3) / 4) * 4; /* round up buffer size */ sel->tmp_buf = malloc(sel->buf_len); if (!sel->tmp_buf) { ret = ENOMEM; goto error; } memcpy(sel->tmp_buf, shd_text, pkt_length * 4); sel->buf_offset = pkt_length * 4; ctx->sub->long_shader_in_progress_handle[type] = handle; } else finished = true; } else { sel = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_SHADER); if (!sel) { fprintf(stderr, "got continuation without original shader %d\n", handle); ret = EINVAL; goto error; } offlen &= ~VIRGL_OBJ_SHADER_OFFSET_CONT; if (offlen != sel->buf_offset) { fprintf(stderr, "Got mismatched shader continuation %d vs %d\n", offlen, sel->buf_offset); ret = EINVAL; goto error; } /*make sure no overflow */ if (pkt_length * 4 < pkt_length || pkt_length * 4 + sel->buf_offset < pkt_length * 4 || pkt_length * 4 + sel->buf_offset < sel->buf_offset) { ret = EINVAL; goto error; } if ((pkt_length * 4 + sel->buf_offset) > sel->buf_len) { fprintf(stderr, "Got too large shader continuation %d vs %d\n", pkt_length * 4 + sel->buf_offset, sel->buf_len); shd_text = sel->tmp_buf; } } if (finished) { struct tgsi_token *tokens; tokens = calloc(num_tokens + 10, sizeof(struct tgsi_token)); if (!tokens) { ret = ENOMEM; goto error; } if (vrend_dump_shaders) fprintf(stderr,"shader\n%s\n", shd_text); if (!tgsi_text_translate((const char *)shd_text, tokens, num_tokens + 10)) { free(tokens); ret = EINVAL; goto error; } if (vrend_finish_shader(ctx, sel, tokens)) { free(tokens); ret = EINVAL; goto error; } else { free(sel->tmp_buf); sel->tmp_buf = NULL; } free(tokens); ctx->sub->long_shader_in_progress_handle[type] = 0; } if (new_shader) { ret_handle = vrend_renderer_object_insert(ctx, sel, sizeof(*sel), handle, VIRGL_OBJECT_SHADER); if (ret_handle == 0) { ret = ENOMEM; goto error; } } return 0; error: if (new_shader) vrend_destroy_shader_selector(sel); else vrend_renderer_object_destroy(ctx, handle); return ret; }
164,945
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int dv_extract_audio(uint8_t* frame, uint8_t* ppcm[4], const DVprofile *sys) { int size, chan, i, j, d, of, smpls, freq, quant, half_ch; uint16_t lc, rc; const uint8_t* as_pack; uint8_t *pcm, ipcm; as_pack = dv_extract_pack(frame, dv_audio_source); if (!as_pack) /* No audio ? */ return 0; smpls = as_pack[1] & 0x3f; /* samples in this frame - min. samples */ freq = (as_pack[4] >> 3) & 0x07; /* 0 - 48kHz, 1 - 44,1kHz, 2 - 32kHz */ quant = as_pack[4] & 0x07; /* 0 - 16bit linear, 1 - 12bit nonlinear */ if (quant > 1) return -1; /* unsupported quantization */ size = (sys->audio_min_samples[freq] + smpls) * 4; /* 2ch, 2bytes */ half_ch = sys->difseg_size / 2; /* We work with 720p frames split in half, thus even frames have * channels 0,1 and odd 2,3. */ ipcm = (sys->height == 720 && !(frame[1] & 0x0C)) ? 2 : 0; pcm = ppcm[ipcm++]; /* for each DIF channel */ for (chan = 0; chan < sys->n_difchan; chan++) { /* for each DIF segment */ for (i = 0; i < sys->difseg_size; i++) { frame += 6 * 80; /* skip DIF segment header */ break; } /* for each AV sequence */ for (j = 0; j < 9; j++) { for (d = 8; d < 80; d += 2) { if (quant == 0) { /* 16bit quantization */ of = sys->audio_shuffle[i][j] + (d - 8) / 2 * sys->audio_stride; if (of*2 >= size) continue; pcm[of*2] = frame[d+1]; // FIXME: maybe we have to admit pcm[of*2+1] = frame[d]; // that DV is a big-endian PCM if (pcm[of*2+1] == 0x80 && pcm[of*2] == 0x00) pcm[of*2+1] = 0; } else { /* 12bit quantization */ lc = ((uint16_t)frame[d] << 4) | ((uint16_t)frame[d+2] >> 4); rc = ((uint16_t)frame[d+1] << 4) | ((uint16_t)frame[d+2] & 0x0f); lc = (lc == 0x800 ? 0 : dv_audio_12to16(lc)); rc = (rc == 0x800 ? 0 : dv_audio_12to16(rc)); of = sys->audio_shuffle[i%half_ch][j] + (d - 8) / 3 * sys->audio_stride; if (of*2 >= size) continue; pcm[of*2] = lc & 0xff; // FIXME: maybe we have to admit pcm[of*2+1] = lc >> 8; // that DV is a big-endian PCM of = sys->audio_shuffle[i%half_ch+half_ch][j] + (d - 8) / 3 * sys->audio_stride; pcm[of*2] = rc & 0xff; // FIXME: maybe we have to admit pcm[of*2+1] = rc >> 8; // that DV is a big-endian PCM ++d; } } frame += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */ } } frame += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */ } Commit Message: CWE ID: CWE-20
static int dv_extract_audio(uint8_t* frame, uint8_t* ppcm[4], const DVprofile *sys) { int size, chan, i, j, d, of, smpls, freq, quant, half_ch; uint16_t lc, rc; const uint8_t* as_pack; uint8_t *pcm, ipcm; as_pack = dv_extract_pack(frame, dv_audio_source); if (!as_pack) /* No audio ? */ return 0; smpls = as_pack[1] & 0x3f; /* samples in this frame - min. samples */ freq = (as_pack[4] >> 3) & 0x07; /* 0 - 48kHz, 1 - 44,1kHz, 2 - 32kHz */ quant = as_pack[4] & 0x07; /* 0 - 16bit linear, 1 - 12bit nonlinear */ if (quant > 1) return -1; /* unsupported quantization */ size = (sys->audio_min_samples[freq] + smpls) * 4; /* 2ch, 2bytes */ half_ch = sys->difseg_size / 2; /* We work with 720p frames split in half, thus even frames have * channels 0,1 and odd 2,3. */ ipcm = (sys->height == 720 && !(frame[1] & 0x0C)) ? 2 : 0; /* for each DIF channel */ for (chan = 0; chan < sys->n_difchan; chan++) { /* next stereo channel (50Mbps and 100Mbps only) */ pcm = ppcm[ipcm++]; if (!pcm) break; /* for each DIF segment */ for (i = 0; i < sys->difseg_size; i++) { frame += 6 * 80; /* skip DIF segment header */ break; } /* for each AV sequence */ for (j = 0; j < 9; j++) { for (d = 8; d < 80; d += 2) { if (quant == 0) { /* 16bit quantization */ of = sys->audio_shuffle[i][j] + (d - 8) / 2 * sys->audio_stride; if (of*2 >= size) continue; pcm[of*2] = frame[d+1]; // FIXME: maybe we have to admit pcm[of*2+1] = frame[d]; // that DV is a big-endian PCM if (pcm[of*2+1] == 0x80 && pcm[of*2] == 0x00) pcm[of*2+1] = 0; } else { /* 12bit quantization */ lc = ((uint16_t)frame[d] << 4) | ((uint16_t)frame[d+2] >> 4); rc = ((uint16_t)frame[d+1] << 4) | ((uint16_t)frame[d+2] & 0x0f); lc = (lc == 0x800 ? 0 : dv_audio_12to16(lc)); rc = (rc == 0x800 ? 0 : dv_audio_12to16(rc)); of = sys->audio_shuffle[i%half_ch][j] + (d - 8) / 3 * sys->audio_stride; if (of*2 >= size) continue; pcm[of*2] = lc & 0xff; // FIXME: maybe we have to admit pcm[of*2+1] = lc >> 8; // that DV is a big-endian PCM of = sys->audio_shuffle[i%half_ch+half_ch][j] + (d - 8) / 3 * sys->audio_stride; pcm[of*2] = rc & 0xff; // FIXME: maybe we have to admit pcm[of*2+1] = rc >> 8; // that DV is a big-endian PCM ++d; } } frame += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */ } } frame += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */ }
165,242
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: SProcXFixesQueryVersion(ClientPtr client) { REQUEST(xXFixesQueryVersionReq); swaps(&stuff->length); swapl(&stuff->majorVersion); return (*ProcXFixesVector[stuff->xfixesReqType]) (client); } Commit Message: CWE ID: CWE-20
SProcXFixesQueryVersion(ClientPtr client) { REQUEST(xXFixesQueryVersionReq); REQUEST_SIZE_MATCH(xXFixesQueryVersionReq); swaps(&stuff->length); swapl(&stuff->majorVersion); return (*ProcXFixesVector[stuff->xfixesReqType]) (client); }
165,444
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void SparseBitSet::initFromRanges(const uint32_t* ranges, size_t nRanges) { if (nRanges == 0) { mMaxVal = 0; mIndices.reset(); mBitmaps.reset(); return; } mMaxVal = ranges[nRanges * 2 - 1]; size_t indexSize = (mMaxVal + kPageMask) >> kLogValuesPerPage; mIndices.reset(new uint32_t[indexSize]); uint32_t nPages = calcNumPages(ranges, nRanges); mBitmaps.reset(new element[nPages << (kLogValuesPerPage - kLogBitsPerEl)]); memset(mBitmaps.get(), 0, nPages << (kLogValuesPerPage - 3)); mZeroPageIndex = noZeroPage; uint32_t nonzeroPageEnd = 0; uint32_t currentPage = 0; for (size_t i = 0; i < nRanges; i++) { uint32_t start = ranges[i * 2]; uint32_t end = ranges[i * 2 + 1]; uint32_t startPage = start >> kLogValuesPerPage; uint32_t endPage = (end - 1) >> kLogValuesPerPage; if (startPage >= nonzeroPageEnd) { if (startPage > nonzeroPageEnd) { if (mZeroPageIndex == noZeroPage) { mZeroPageIndex = (currentPage++) << (kLogValuesPerPage - kLogBitsPerEl); } for (uint32_t j = nonzeroPageEnd; j < startPage; j++) { mIndices[j] = mZeroPageIndex; } } mIndices[startPage] = (currentPage++) << (kLogValuesPerPage - kLogBitsPerEl); } size_t index = ((currentPage - 1) << (kLogValuesPerPage - kLogBitsPerEl)) + ((start & kPageMask) >> kLogBitsPerEl); size_t nElements = (end - (start & ~kElMask) + kElMask) >> kLogBitsPerEl; if (nElements == 1) { mBitmaps[index] |= (kElAllOnes >> (start & kElMask)) & (kElAllOnes << ((-end) & kElMask)); } else { mBitmaps[index] |= kElAllOnes >> (start & kElMask); for (size_t j = 1; j < nElements - 1; j++) { mBitmaps[index + j] = kElAllOnes; } mBitmaps[index + nElements - 1] |= kElAllOnes << ((-end) & kElMask); } for (size_t j = startPage + 1; j < endPage + 1; j++) { mIndices[j] = (currentPage++) << (kLogValuesPerPage - kLogBitsPerEl); } nonzeroPageEnd = endPage + 1; } } Commit Message: Reject fonts with invalid ranges in cmap A corrupt or malicious font may have a negative size in its cmap range, which in turn could lead to memory corruption. This patch detects the case and rejects the font, and also includes an assertion in the sparse bit set implementation if we missed any such case. External issue: https://code.google.com/p/android/issues/detail?id=192618 Bug: 26413177 Change-Id: Icc0c80e4ef389abba0964495b89aa0fae3e9f4b2 CWE ID: CWE-20
void SparseBitSet::initFromRanges(const uint32_t* ranges, size_t nRanges) { if (nRanges == 0) { mMaxVal = 0; mIndices.reset(); mBitmaps.reset(); return; } mMaxVal = ranges[nRanges * 2 - 1]; size_t indexSize = (mMaxVal + kPageMask) >> kLogValuesPerPage; mIndices.reset(new uint32_t[indexSize]); uint32_t nPages = calcNumPages(ranges, nRanges); mBitmaps.reset(new element[nPages << (kLogValuesPerPage - kLogBitsPerEl)]); memset(mBitmaps.get(), 0, nPages << (kLogValuesPerPage - 3)); mZeroPageIndex = noZeroPage; uint32_t nonzeroPageEnd = 0; uint32_t currentPage = 0; for (size_t i = 0; i < nRanges; i++) { uint32_t start = ranges[i * 2]; uint32_t end = ranges[i * 2 + 1]; LOG_ALWAYS_FATAL_IF(end < start); // make sure range size is nonnegative uint32_t startPage = start >> kLogValuesPerPage; uint32_t endPage = (end - 1) >> kLogValuesPerPage; if (startPage >= nonzeroPageEnd) { if (startPage > nonzeroPageEnd) { if (mZeroPageIndex == noZeroPage) { mZeroPageIndex = (currentPage++) << (kLogValuesPerPage - kLogBitsPerEl); } for (uint32_t j = nonzeroPageEnd; j < startPage; j++) { mIndices[j] = mZeroPageIndex; } } mIndices[startPage] = (currentPage++) << (kLogValuesPerPage - kLogBitsPerEl); } size_t index = ((currentPage - 1) << (kLogValuesPerPage - kLogBitsPerEl)) + ((start & kPageMask) >> kLogBitsPerEl); size_t nElements = (end - (start & ~kElMask) + kElMask) >> kLogBitsPerEl; if (nElements == 1) { mBitmaps[index] |= (kElAllOnes >> (start & kElMask)) & (kElAllOnes << ((-end) & kElMask)); } else { mBitmaps[index] |= kElAllOnes >> (start & kElMask); for (size_t j = 1; j < nElements - 1; j++) { mBitmaps[index + j] = kElAllOnes; } mBitmaps[index + nElements - 1] |= kElAllOnes << ((-end) & kElMask); } for (size_t j = startPage + 1; j < endPage + 1; j++) { mIndices[j] = (currentPage++) << (kLogValuesPerPage - kLogBitsPerEl); } nonzeroPageEnd = endPage + 1; } }
174,236
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static char *__filterQuotedShell(const char *arg) { r_return_val_if_fail (arg, NULL); char *a = malloc (strlen (arg) + 1); if (!a) { return NULL; } char *b = a; while (*arg) { switch (*arg) { case ' ': case '=': case '\r': case '\n': break; default: *b++ = *arg; break; } arg++; } *b = 0; return a; } Commit Message: More fixes for the CVE-2019-14745 CWE ID: CWE-78
static char *__filterQuotedShell(const char *arg) { r_return_val_if_fail (arg, NULL); char *a = malloc (strlen (arg) + 1); if (!a) { return NULL; } char *b = a; while (*arg) { switch (*arg) { case ' ': case '=': case '"': case '\\': case '\r': case '\n': break; default: *b++ = *arg; break; } arg++; } *b = 0; return a; }
170,184
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; int err = -EOPNOTSUPP; struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; if (flags & MSG_OOB) goto out; if (addr_len) *addr_len = sizeof(*sin); if (flags & MSG_ERRQUEUE) { err = ip_recv_error(sk, msg, len); goto out; } skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: if (err) return err; return copied; } Commit Message: inet: prevent leakage of uninitialized memory to user in recv syscalls Only update *addr_len when we actually fill in sockaddr, otherwise we can return uninitialized memory from the stack to the caller in the recvfrom, recvmmsg and recvmsg syscalls. Drop the the (addr_len == NULL) checks because we only get called with a valid addr_len pointer either from sock_common_recvmsg or inet_recvmsg. If a blocking read waits on a socket which is concurrently shut down we now return zero and set msg_msgnamelen to 0. Reported-by: mpb <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-200
static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; int err = -EOPNOTSUPP; struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; if (flags & MSG_OOB) goto out; if (flags & MSG_ERRQUEUE) { err = ip_recv_error(sk, msg, len); goto out; } skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); *addr_len = sizeof(*sin); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: if (err) return err; return copied; }
166,478
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static char **fill_envp(struct dhcp_packet *packet) { int envc; int i; char **envp, **curr; const char *opt_name; uint8_t *temp; uint8_t overload = 0; #define BITMAP unsigned #define BBITS (sizeof(BITMAP) * 8) #define BMASK(i) (1 << (i & (sizeof(BITMAP) * 8 - 1))) #define FOUND_OPTS(i) (found_opts[(unsigned)i / BBITS]) BITMAP found_opts[256 / BBITS]; memset(found_opts, 0, sizeof(found_opts)); /* We need 6 elements for: * "interface=IFACE" * "ip=N.N.N.N" from packet->yiaddr * "siaddr=IP" from packet->siaddr_nip (unless 0) * "boot_file=FILE" from packet->file (unless overloaded) * "sname=SERVER_HOSTNAME" from packet->sname (unless overloaded) * terminating NULL */ envc = 6; /* +1 element for each option, +2 for subnet option: */ if (packet) { /* note: do not search for "pad" (0) and "end" (255) options */ for (i = 1; i < 255; i++) { temp = udhcp_get_option(packet, i); if (temp) { if (i == DHCP_OPTION_OVERLOAD) overload |= *temp; else if (i == DHCP_SUBNET) envc++; /* for $mask */ envc++; /*if (i != DHCP_MESSAGE_TYPE)*/ FOUND_OPTS(i) |= BMASK(i); } } } curr = envp = xzalloc(sizeof(envp[0]) * envc); *curr = xasprintf("interface=%s", client_config.interface); putenv(*curr++); if (!packet) return envp; /* Export BOOTP fields. Fields we don't (yet?) export: * uint8_t op; // always BOOTREPLY * uint8_t htype; // hardware address type. 1 = 10mb ethernet * uint8_t hlen; // hardware address length * uint8_t hops; // used by relay agents only * uint32_t xid; * uint16_t secs; // elapsed since client began acquisition/renewal * uint16_t flags; // only one flag so far: bcast. Never set by server * uint32_t ciaddr; // client IP (usually == yiaddr. can it be different * // if during renew server wants to give us different IP?) * uint32_t gateway_nip; // relay agent IP address * uint8_t chaddr[16]; // link-layer client hardware address (MAC) * TODO: export gateway_nip as $giaddr? */ /* Most important one: yiaddr as $ip */ *curr = xmalloc(sizeof("ip=255.255.255.255")); sprint_nip(*curr, "ip=", (uint8_t *) &packet->yiaddr); putenv(*curr++); if (packet->siaddr_nip) { /* IP address of next server to use in bootstrap */ *curr = xmalloc(sizeof("siaddr=255.255.255.255")); sprint_nip(*curr, "siaddr=", (uint8_t *) &packet->siaddr_nip); putenv(*curr++); } if (!(overload & FILE_FIELD) && packet->file[0]) { /* watch out for invalid packets */ *curr = xasprintf("boot_file=%."DHCP_PKT_FILE_LEN_STR"s", packet->file); putenv(*curr++); } if (!(overload & SNAME_FIELD) && packet->sname[0]) { /* watch out for invalid packets */ *curr = xasprintf("sname=%."DHCP_PKT_SNAME_LEN_STR"s", packet->sname); putenv(*curr++); } /* Export known DHCP options */ opt_name = dhcp_option_strings; i = 0; while (*opt_name) { uint8_t code = dhcp_optflags[i].code; BITMAP *found_ptr = &FOUND_OPTS(code); BITMAP found_mask = BMASK(code); if (!(*found_ptr & found_mask)) goto next; *found_ptr &= ~found_mask; /* leave only unknown options */ temp = udhcp_get_option(packet, code); *curr = xmalloc_optname_optval(temp, &dhcp_optflags[i], opt_name); putenv(*curr++); if (code == DHCP_SUBNET) { /* Subnet option: make things like "$ip/$mask" possible */ uint32_t subnet; move_from_unaligned32(subnet, temp); *curr = xasprintf("mask=%u", mton(subnet)); putenv(*curr++); } next: opt_name += strlen(opt_name) + 1; i++; } /* Export unknown options */ for (i = 0; i < 256;) { BITMAP bitmap = FOUND_OPTS(i); if (!bitmap) { i += BBITS; continue; } if (bitmap & BMASK(i)) { unsigned len, ofs; temp = udhcp_get_option(packet, i); /* udhcp_get_option returns ptr to data portion, * need to go back to get len */ len = temp[-OPT_DATA + OPT_LEN]; *curr = xmalloc(sizeof("optNNN=") + 1 + len*2); ofs = sprintf(*curr, "opt%u=", i); *bin2hex(*curr + ofs, (void*) temp, len) = '\0'; putenv(*curr++); } i++; } return envp; } Commit Message: CWE ID: CWE-125
static char **fill_envp(struct dhcp_packet *packet) { int envc; int i; char **envp, **curr; const char *opt_name; uint8_t *temp; uint8_t overload = 0; #define BITMAP unsigned #define BBITS (sizeof(BITMAP) * 8) #define BMASK(i) (1 << (i & (sizeof(BITMAP) * 8 - 1))) #define FOUND_OPTS(i) (found_opts[(unsigned)i / BBITS]) BITMAP found_opts[256 / BBITS]; memset(found_opts, 0, sizeof(found_opts)); /* We need 6 elements for: * "interface=IFACE" * "ip=N.N.N.N" from packet->yiaddr * "siaddr=IP" from packet->siaddr_nip (unless 0) * "boot_file=FILE" from packet->file (unless overloaded) * "sname=SERVER_HOSTNAME" from packet->sname (unless overloaded) * terminating NULL */ envc = 6; /* +1 element for each option, +2 for subnet option: */ if (packet) { /* note: do not search for "pad" (0) and "end" (255) options */ for (i = 1; i < 255; i++) { temp = udhcp_get_option(packet, i); if (temp) { if (i == DHCP_OPTION_OVERLOAD) overload |= *temp; else if (i == DHCP_SUBNET) envc++; /* for $mask */ envc++; /*if (i != DHCP_MESSAGE_TYPE)*/ FOUND_OPTS(i) |= BMASK(i); } } } curr = envp = xzalloc(sizeof(envp[0]) * envc); *curr = xasprintf("interface=%s", client_config.interface); putenv(*curr++); if (!packet) return envp; /* Export BOOTP fields. Fields we don't (yet?) export: * uint8_t op; // always BOOTREPLY * uint8_t htype; // hardware address type. 1 = 10mb ethernet * uint8_t hlen; // hardware address length * uint8_t hops; // used by relay agents only * uint32_t xid; * uint16_t secs; // elapsed since client began acquisition/renewal * uint16_t flags; // only one flag so far: bcast. Never set by server * uint32_t ciaddr; // client IP (usually == yiaddr. can it be different * // if during renew server wants to give us different IP?) * uint32_t gateway_nip; // relay agent IP address * uint8_t chaddr[16]; // link-layer client hardware address (MAC) * TODO: export gateway_nip as $giaddr? */ /* Most important one: yiaddr as $ip */ *curr = xmalloc(sizeof("ip=255.255.255.255")); sprint_nip(*curr, "ip=", (uint8_t *) &packet->yiaddr); putenv(*curr++); if (packet->siaddr_nip) { /* IP address of next server to use in bootstrap */ *curr = xmalloc(sizeof("siaddr=255.255.255.255")); sprint_nip(*curr, "siaddr=", (uint8_t *) &packet->siaddr_nip); putenv(*curr++); } if (!(overload & FILE_FIELD) && packet->file[0]) { /* watch out for invalid packets */ *curr = xasprintf("boot_file=%."DHCP_PKT_FILE_LEN_STR"s", packet->file); putenv(*curr++); } if (!(overload & SNAME_FIELD) && packet->sname[0]) { /* watch out for invalid packets */ *curr = xasprintf("sname=%."DHCP_PKT_SNAME_LEN_STR"s", packet->sname); putenv(*curr++); } /* Export known DHCP options */ opt_name = dhcp_option_strings; i = 0; while (*opt_name) { uint8_t code = dhcp_optflags[i].code; BITMAP *found_ptr = &FOUND_OPTS(code); BITMAP found_mask = BMASK(code); if (!(*found_ptr & found_mask)) goto next; *found_ptr &= ~found_mask; /* leave only unknown options */ temp = udhcp_get_option(packet, code); *curr = xmalloc_optname_optval(temp, &dhcp_optflags[i], opt_name); putenv(*curr++); if (code == DHCP_SUBNET && temp[-OPT_DATA + OPT_LEN] == 4) { /* Subnet option: make things like "$ip/$mask" possible */ uint32_t subnet; move_from_unaligned32(subnet, temp); *curr = xasprintf("mask=%u", mton(subnet)); putenv(*curr++); } next: opt_name += strlen(opt_name) + 1; i++; } /* Export unknown options */ for (i = 0; i < 256;) { BITMAP bitmap = FOUND_OPTS(i); if (!bitmap) { i += BBITS; continue; } if (bitmap & BMASK(i)) { unsigned len, ofs; temp = udhcp_get_option(packet, i); /* udhcp_get_option returns ptr to data portion, * need to go back to get len */ len = temp[-OPT_DATA + OPT_LEN]; *curr = xmalloc(sizeof("optNNN=") + 1 + len*2); ofs = sprintf(*curr, "opt%u=", i); *bin2hex(*curr + ofs, (void*) temp, len) = '\0'; putenv(*curr++); } i++; } return envp; }
164,943
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: ResourceDispatcherHostImpl::ResourceDispatcherHostImpl() : download_file_manager_(new DownloadFileManager(NULL)), save_file_manager_(new SaveFileManager()), request_id_(-1), ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)), ALLOW_THIS_IN_INITIALIZER_LIST(ssl_delegate_weak_factory_(this)), is_shutdown_(false), max_outstanding_requests_cost_per_process_( kMaxOutstandingRequestsCostPerProcess), filter_(NULL), delegate_(NULL), allow_cross_origin_auth_prompt_(false) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); DCHECK(!g_resource_dispatcher_host); g_resource_dispatcher_host = this; GetContentClient()->browser()->ResourceDispatcherHostCreated(); ANNOTATE_BENIGN_RACE( &last_user_gesture_time_, "We don't care about the precise value, see http://crbug.com/92889"); BrowserThread::PostTask( BrowserThread::IO, FROM_HERE, base::Bind(&appcache::AppCacheInterceptor::EnsureRegistered)); update_load_states_timer_.reset( new base::RepeatingTimer<ResourceDispatcherHostImpl>()); } Commit Message: Inherits SupportsWeakPtr<T> instead of having WeakPtrFactory<T> This change refines r137676. BUG=122654 TEST=browser_test Review URL: https://chromiumcodereview.appspot.com/10332233 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@139771 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-119
ResourceDispatcherHostImpl::ResourceDispatcherHostImpl() : download_file_manager_(new DownloadFileManager(NULL)), save_file_manager_(new SaveFileManager()), request_id_(-1), is_shutdown_(false), max_outstanding_requests_cost_per_process_( kMaxOutstandingRequestsCostPerProcess), filter_(NULL), delegate_(NULL), allow_cross_origin_auth_prompt_(false) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); DCHECK(!g_resource_dispatcher_host); g_resource_dispatcher_host = this; GetContentClient()->browser()->ResourceDispatcherHostCreated(); ANNOTATE_BENIGN_RACE( &last_user_gesture_time_, "We don't care about the precise value, see http://crbug.com/92889"); BrowserThread::PostTask( BrowserThread::IO, FROM_HERE, base::Bind(&appcache::AppCacheInterceptor::EnsureRegistered)); update_load_states_timer_.reset( new base::RepeatingTimer<ResourceDispatcherHostImpl>()); }
170,991
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: ip_optprint(netdissect_options *ndo, register const u_char *cp, u_int length) { register u_int option_len; const char *sep = ""; for (; length > 0; cp += option_len, length -= option_len) { u_int option_code; ND_PRINT((ndo, "%s", sep)); sep = ","; ND_TCHECK(*cp); option_code = *cp; ND_PRINT((ndo, "%s", tok2str(ip_option_values,"unknown %u",option_code))); if (option_code == IPOPT_NOP || option_code == IPOPT_EOL) option_len = 1; else { ND_TCHECK(cp[1]); option_len = cp[1]; if (option_len < 2) { ND_PRINT((ndo, " [bad length %u]", option_len)); return; } } if (option_len > length) { ND_PRINT((ndo, " [bad length %u]", option_len)); return; } ND_TCHECK2(*cp, option_len); switch (option_code) { case IPOPT_EOL: return; case IPOPT_TS: ip_printts(ndo, cp, option_len); break; case IPOPT_RR: /* fall through */ case IPOPT_SSRR: case IPOPT_LSRR: ip_printroute(ndo, cp, option_len); break; case IPOPT_RA: if (option_len < 4) { ND_PRINT((ndo, " [bad length %u]", option_len)); break; } ND_TCHECK(cp[3]); if (EXTRACT_16BITS(&cp[2]) != 0) ND_PRINT((ndo, " value %u", EXTRACT_16BITS(&cp[2]))); break; case IPOPT_NOP: /* nothing to print - fall through */ case IPOPT_SECURITY: default: break; } } return; trunc: ND_PRINT((ndo, "%s", tstr)); } Commit Message: CVE-2017-13022/IP: Add bounds checks to ip_printroute(). This fixes a buffer over-read discovered by Bhargava Shastry, SecT/TU Berlin. Add a test using the capture file supplied by the reporter(s), modified so the capture file won't be rejected as an invalid capture. CWE ID: CWE-125
ip_optprint(netdissect_options *ndo, register const u_char *cp, u_int length) { register u_int option_len; const char *sep = ""; for (; length > 0; cp += option_len, length -= option_len) { u_int option_code; ND_PRINT((ndo, "%s", sep)); sep = ","; ND_TCHECK(*cp); option_code = *cp; ND_PRINT((ndo, "%s", tok2str(ip_option_values,"unknown %u",option_code))); if (option_code == IPOPT_NOP || option_code == IPOPT_EOL) option_len = 1; else { ND_TCHECK(cp[1]); option_len = cp[1]; if (option_len < 2) { ND_PRINT((ndo, " [bad length %u]", option_len)); return; } } if (option_len > length) { ND_PRINT((ndo, " [bad length %u]", option_len)); return; } ND_TCHECK2(*cp, option_len); switch (option_code) { case IPOPT_EOL: return; case IPOPT_TS: ip_printts(ndo, cp, option_len); break; case IPOPT_RR: /* fall through */ case IPOPT_SSRR: case IPOPT_LSRR: if (ip_printroute(ndo, cp, option_len) == -1) goto trunc; break; case IPOPT_RA: if (option_len < 4) { ND_PRINT((ndo, " [bad length %u]", option_len)); break; } ND_TCHECK(cp[3]); if (EXTRACT_16BITS(&cp[2]) != 0) ND_PRINT((ndo, " value %u", EXTRACT_16BITS(&cp[2]))); break; case IPOPT_NOP: /* nothing to print - fall through */ case IPOPT_SECURITY: default: break; } } return; trunc: ND_PRINT((ndo, "%s", tstr)); }
167,869
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: OMX_ERRORTYPE omx_video::allocate_input_buffer( OMX_IN OMX_HANDLETYPE hComp, OMX_INOUT OMX_BUFFERHEADERTYPE** bufferHdr, OMX_IN OMX_U32 port, OMX_IN OMX_PTR appData, OMX_IN OMX_U32 bytes) { (void)hComp, (void)port; OMX_ERRORTYPE eRet = OMX_ErrorNone; unsigned i = 0; DEBUG_PRINT_HIGH("allocate_input_buffer()::"); if (bytes != m_sInPortDef.nBufferSize) { DEBUG_PRINT_ERROR("ERROR: Buffer size mismatch error: bytes[%u] != nBufferSize[%u]", (unsigned int)bytes, (unsigned int)m_sInPortDef.nBufferSize); return OMX_ErrorBadParameter; } if (!m_inp_mem_ptr) { DEBUG_PRINT_HIGH("%s: size = %u, actual cnt %u", __FUNCTION__, (unsigned int)m_sInPortDef.nBufferSize, (unsigned int)m_sInPortDef.nBufferCountActual); m_inp_mem_ptr = (OMX_BUFFERHEADERTYPE*) \ calloc( (sizeof(OMX_BUFFERHEADERTYPE)), m_sInPortDef.nBufferCountActual); if (m_inp_mem_ptr == NULL) { DEBUG_PRINT_ERROR("ERROR: calloc() Failed for m_inp_mem_ptr"); return OMX_ErrorInsufficientResources; } DEBUG_PRINT_LOW("Successfully allocated m_inp_mem_ptr = %p", m_inp_mem_ptr); m_pInput_pmem = (struct pmem *) calloc(sizeof (struct pmem), m_sInPortDef.nBufferCountActual); if (m_pInput_pmem == NULL) { DEBUG_PRINT_ERROR("ERROR: calloc() Failed for m_pInput_pmem"); return OMX_ErrorInsufficientResources; } #ifdef USE_ION m_pInput_ion = (struct venc_ion *) calloc(sizeof (struct venc_ion), m_sInPortDef.nBufferCountActual); if (m_pInput_ion == NULL) { DEBUG_PRINT_ERROR("ERROR: calloc() Failed for m_pInput_ion"); return OMX_ErrorInsufficientResources; } #endif for (i=0; i< m_sInPortDef.nBufferCountActual; i++) { m_pInput_pmem[i].fd = -1; #ifdef USE_ION m_pInput_ion[i].ion_device_fd =-1; m_pInput_ion[i].fd_ion_data.fd =-1; m_pInput_ion[i].ion_alloc_data.handle = 0; #endif } } for (i=0; i< m_sInPortDef.nBufferCountActual; i++) { if (BITMASK_ABSENT(&m_inp_bm_count,i)) { break; } } if (i < m_sInPortDef.nBufferCountActual) { *bufferHdr = (m_inp_mem_ptr + i); (*bufferHdr)->nSize = sizeof(OMX_BUFFERHEADERTYPE); (*bufferHdr)->nVersion.nVersion = OMX_SPEC_VERSION; (*bufferHdr)->nAllocLen = m_sInPortDef.nBufferSize; (*bufferHdr)->pAppPrivate = appData; (*bufferHdr)->nInputPortIndex = PORT_INDEX_IN; (*bufferHdr)->pInputPortPrivate = (OMX_PTR)&m_pInput_pmem[i]; #ifdef USE_ION #ifdef _MSM8974_ m_pInput_ion[i].ion_device_fd = alloc_map_ion_memory(m_sInPortDef.nBufferSize, &m_pInput_ion[i].ion_alloc_data, &m_pInput_ion[i].fd_ion_data,0); #else m_pInput_ion[i].ion_device_fd = alloc_map_ion_memory(m_sInPortDef.nBufferSize, &m_pInput_ion[i].ion_alloc_data, &m_pInput_ion[i].fd_ion_data,ION_FLAG_CACHED); #endif if (m_pInput_ion[i].ion_device_fd < 0) { DEBUG_PRINT_ERROR("ERROR:ION device open() Failed"); return OMX_ErrorInsufficientResources; } m_pInput_pmem[i].fd = m_pInput_ion[i].fd_ion_data.fd; #else m_pInput_pmem[i].fd = open (MEM_DEVICE,O_RDWR); if (m_pInput_pmem[i].fd == 0) { m_pInput_pmem[i].fd = open (MEM_DEVICE,O_RDWR); } if (m_pInput_pmem[i].fd < 0) { DEBUG_PRINT_ERROR("ERROR: /dev/pmem_adsp open() Failed"); return OMX_ErrorInsufficientResources; } #endif m_pInput_pmem[i].size = m_sInPortDef.nBufferSize; m_pInput_pmem[i].offset = 0; m_pInput_pmem[i].buffer = (OMX_U8 *)SECURE_BUFPTR; if(!secure_session) { m_pInput_pmem[i].buffer = (unsigned char *)mmap(NULL, m_pInput_pmem[i].size,PROT_READ|PROT_WRITE, MAP_SHARED,m_pInput_pmem[i].fd,0); if (m_pInput_pmem[i].buffer == MAP_FAILED) { DEBUG_PRINT_ERROR("ERROR: mmap FAILED= %d", errno); close(m_pInput_pmem[i].fd); #ifdef USE_ION free_ion_memory(&m_pInput_ion[i]); #endif return OMX_ErrorInsufficientResources; } } else { m_pInput_pmem[i].buffer = malloc(sizeof(OMX_U32) + sizeof(native_handle_t*)); (*bufferHdr)->nAllocLen = sizeof(OMX_U32) + sizeof(native_handle_t*); } (*bufferHdr)->pBuffer = (OMX_U8 *)m_pInput_pmem[i].buffer; DEBUG_PRINT_LOW("Virtual address in allocate buffer is %p", m_pInput_pmem[i].buffer); BITMASK_SET(&m_inp_bm_count,i); if (!mUseProxyColorFormat && (dev_use_buf(&m_pInput_pmem[i],PORT_INDEX_IN,i) != true)) { DEBUG_PRINT_ERROR("ERROR: dev_use_buf FAILED for i/p buf"); return OMX_ErrorInsufficientResources; } } else { DEBUG_PRINT_ERROR("ERROR: All i/p buffers are allocated, invalid allocate buf call" "for index [%d]", i); eRet = OMX_ErrorInsufficientResources; } return eRet; } Commit Message: DO NOT MERGE mm-video-v4l2: venc: add checks before accessing heap pointers Heap pointers do not point to user virtual addresses in case of secure session. Set them to NULL and add checks to avoid accesing them Bug: 28815329 Bug: 28920116 Change-Id: I94fd5808e753b58654d65e175d3857ef46ffba26 CWE ID: CWE-200
OMX_ERRORTYPE omx_video::allocate_input_buffer( OMX_IN OMX_HANDLETYPE hComp, OMX_INOUT OMX_BUFFERHEADERTYPE** bufferHdr, OMX_IN OMX_U32 port, OMX_IN OMX_PTR appData, OMX_IN OMX_U32 bytes) { (void)hComp, (void)port; OMX_ERRORTYPE eRet = OMX_ErrorNone; unsigned i = 0; DEBUG_PRINT_HIGH("allocate_input_buffer()::"); if (bytes != m_sInPortDef.nBufferSize) { DEBUG_PRINT_ERROR("ERROR: Buffer size mismatch error: bytes[%u] != nBufferSize[%u]", (unsigned int)bytes, (unsigned int)m_sInPortDef.nBufferSize); return OMX_ErrorBadParameter; } if (!m_inp_mem_ptr) { DEBUG_PRINT_HIGH("%s: size = %u, actual cnt %u", __FUNCTION__, (unsigned int)m_sInPortDef.nBufferSize, (unsigned int)m_sInPortDef.nBufferCountActual); m_inp_mem_ptr = (OMX_BUFFERHEADERTYPE*) \ calloc( (sizeof(OMX_BUFFERHEADERTYPE)), m_sInPortDef.nBufferCountActual); if (m_inp_mem_ptr == NULL) { DEBUG_PRINT_ERROR("ERROR: calloc() Failed for m_inp_mem_ptr"); return OMX_ErrorInsufficientResources; } DEBUG_PRINT_LOW("Successfully allocated m_inp_mem_ptr = %p", m_inp_mem_ptr); m_pInput_pmem = (struct pmem *) calloc(sizeof (struct pmem), m_sInPortDef.nBufferCountActual); if (m_pInput_pmem == NULL) { DEBUG_PRINT_ERROR("ERROR: calloc() Failed for m_pInput_pmem"); return OMX_ErrorInsufficientResources; } #ifdef USE_ION m_pInput_ion = (struct venc_ion *) calloc(sizeof (struct venc_ion), m_sInPortDef.nBufferCountActual); if (m_pInput_ion == NULL) { DEBUG_PRINT_ERROR("ERROR: calloc() Failed for m_pInput_ion"); return OMX_ErrorInsufficientResources; } #endif for (i=0; i< m_sInPortDef.nBufferCountActual; i++) { m_pInput_pmem[i].fd = -1; #ifdef USE_ION m_pInput_ion[i].ion_device_fd =-1; m_pInput_ion[i].fd_ion_data.fd =-1; m_pInput_ion[i].ion_alloc_data.handle = 0; #endif } } for (i=0; i< m_sInPortDef.nBufferCountActual; i++) { if (BITMASK_ABSENT(&m_inp_bm_count,i)) { break; } } if (i < m_sInPortDef.nBufferCountActual) { *bufferHdr = (m_inp_mem_ptr + i); (*bufferHdr)->nSize = sizeof(OMX_BUFFERHEADERTYPE); (*bufferHdr)->nVersion.nVersion = OMX_SPEC_VERSION; (*bufferHdr)->nAllocLen = m_sInPortDef.nBufferSize; (*bufferHdr)->pAppPrivate = appData; (*bufferHdr)->nInputPortIndex = PORT_INDEX_IN; (*bufferHdr)->pInputPortPrivate = (OMX_PTR)&m_pInput_pmem[i]; #ifdef USE_ION #ifdef _MSM8974_ m_pInput_ion[i].ion_device_fd = alloc_map_ion_memory(m_sInPortDef.nBufferSize, &m_pInput_ion[i].ion_alloc_data, &m_pInput_ion[i].fd_ion_data,0); #else m_pInput_ion[i].ion_device_fd = alloc_map_ion_memory(m_sInPortDef.nBufferSize, &m_pInput_ion[i].ion_alloc_data, &m_pInput_ion[i].fd_ion_data,ION_FLAG_CACHED); #endif if (m_pInput_ion[i].ion_device_fd < 0) { DEBUG_PRINT_ERROR("ERROR:ION device open() Failed"); return OMX_ErrorInsufficientResources; } m_pInput_pmem[i].fd = m_pInput_ion[i].fd_ion_data.fd; #else m_pInput_pmem[i].fd = open (MEM_DEVICE,O_RDWR); if (m_pInput_pmem[i].fd == 0) { m_pInput_pmem[i].fd = open (MEM_DEVICE,O_RDWR); } if (m_pInput_pmem[i].fd < 0) { DEBUG_PRINT_ERROR("ERROR: /dev/pmem_adsp open() Failed"); return OMX_ErrorInsufficientResources; } #endif m_pInput_pmem[i].size = m_sInPortDef.nBufferSize; m_pInput_pmem[i].offset = 0; m_pInput_pmem[i].buffer = NULL; if(!secure_session) { m_pInput_pmem[i].buffer = (unsigned char *)mmap(NULL, m_pInput_pmem[i].size,PROT_READ|PROT_WRITE, MAP_SHARED,m_pInput_pmem[i].fd,0); if (m_pInput_pmem[i].buffer == MAP_FAILED) { DEBUG_PRINT_ERROR("ERROR: mmap FAILED= %d", errno); m_pInput_pmem[i].buffer = NULL; close(m_pInput_pmem[i].fd); #ifdef USE_ION free_ion_memory(&m_pInput_ion[i]); #endif return OMX_ErrorInsufficientResources; } } else { m_pInput_pmem[i].buffer = malloc(sizeof(OMX_U32) + sizeof(native_handle_t*)); if (m_pInput_pmem[i].buffer == NULL) { DEBUG_PRINT_ERROR("%s: failed to allocate native-handle", __func__); return OMX_ErrorInsufficientResources; } (*bufferHdr)->nAllocLen = sizeof(OMX_U32) + sizeof(native_handle_t*); } (*bufferHdr)->pBuffer = (OMX_U8 *)m_pInput_pmem[i].buffer; DEBUG_PRINT_LOW("Virtual address in allocate buffer is %p", m_pInput_pmem[i].buffer); BITMASK_SET(&m_inp_bm_count,i); if (!mUseProxyColorFormat && (dev_use_buf(&m_pInput_pmem[i],PORT_INDEX_IN,i) != true)) { DEBUG_PRINT_ERROR("ERROR: dev_use_buf FAILED for i/p buf"); return OMX_ErrorInsufficientResources; } } else { DEBUG_PRINT_ERROR("ERROR: All i/p buffers are allocated, invalid allocate buf call" "for index [%d]", i); eRet = OMX_ErrorInsufficientResources; } return eRet; }
173,501
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: __u32 secure_ipv6_id(const __be32 daddr[4]) { const struct keydata *keyptr; __u32 hash[4]; keyptr = get_keyptr(); hash[0] = (__force __u32)daddr[0]; hash[1] = (__force __u32)daddr[1]; hash[2] = (__force __u32)daddr[2]; hash[3] = (__force __u32)daddr[3]; return half_md4_transform(hash, keyptr->secret); } Commit Message: net: Compute protocol sequence numbers and fragment IDs using MD5. Computers have become a lot faster since we compromised on the partial MD4 hash which we use currently for performance reasons. MD5 is a much safer choice, and is inline with both RFC1948 and other ISS generators (OpenBSD, Solaris, etc.) Furthermore, only having 24-bits of the sequence number be truly unpredictable is a very serious limitation. So the periodic regeneration and 8-bit counter have been removed. We compute and use a full 32-bit sequence number. For ipv6, DCCP was found to use a 32-bit truncated initial sequence number (it needs 43-bits) and that is fixed here as well. Reported-by: Dan Kaminsky <[email protected]> Tested-by: Willy Tarreau <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID:
__u32 secure_ipv6_id(const __be32 daddr[4])
165,766
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: my_object_many_args (MyObject *obj, guint32 x, const char *str, double trouble, double *d_ret, char **str_ret, GError **error) { *d_ret = trouble + (x * 2); *str_ret = g_ascii_strup (str, -1); return TRUE; } Commit Message: CWE ID: CWE-264
my_object_many_args (MyObject *obj, guint32 x, const char *str, double trouble, double *d_ret, char **str_ret, GError **error)
165,110
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: int __glXDispSwap_CreateContext(__GLXclientState *cl, GLbyte *pc) { xGLXCreateContextReq *req = (xGLXCreateContextReq *) pc; __GLX_DECLARE_SWAP_VARIABLES; __GLX_SWAP_SHORT(&req->length); __GLX_SWAP_INT(&req->context); __GLX_SWAP_INT(&req->visual); return __glXDisp_CreateContext(cl, pc); } Commit Message: CWE ID: CWE-20
int __glXDispSwap_CreateContext(__GLXclientState *cl, GLbyte *pc) { ClientPtr client = cl->client; xGLXCreateContextReq *req = (xGLXCreateContextReq *) pc; __GLX_DECLARE_SWAP_VARIABLES; REQUEST_SIZE_MATCH(xGLXCreateContextReq); __GLX_SWAP_SHORT(&req->length); __GLX_SWAP_INT(&req->context); __GLX_SWAP_INT(&req->visual); return __glXDisp_CreateContext(cl, pc); }
165,269
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: SProcXFixesSelectCursorInput(ClientPtr client) { REQUEST(xXFixesSelectCursorInputReq); swaps(&stuff->length); swapl(&stuff->window); return (*ProcXFixesVector[stuff->xfixesReqType]) (client); } Commit Message: CWE ID: CWE-20
SProcXFixesSelectCursorInput(ClientPtr client) { REQUEST(xXFixesSelectCursorInputReq); REQUEST_SIZE_MATCH(xXFixesSelectCursorInputReq); swaps(&stuff->length); swapl(&stuff->window); return (*ProcXFixesVector[stuff->xfixesReqType]) (client); }
165,441
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: inline HTMLIFrameElement::HTMLIFrameElement(Document& document) : HTMLFrameElementBase(iframeTag, document), did_load_non_empty_document_(false), collapsed_by_client_(false), sandbox_(HTMLIFrameElementSandbox::Create(this)), referrer_policy_(kReferrerPolicyDefault) {} Commit Message: Resource Timing: Do not report subsequent navigations within subframes We only want to record resource timing for the load that was initiated by parent document. We filter out subsequent navigations for <iframe>, but we should do it for other types of subframes too. Bug: 780312 Change-Id: I3a7b9e1a365c99e24bb8dac190e88c7099fc3da5 Reviewed-on: https://chromium-review.googlesource.com/750487 Reviewed-by: Nate Chapin <[email protected]> Commit-Queue: Kunihiko Sakamoto <[email protected]> Cr-Commit-Position: refs/heads/master@{#513665} CWE ID: CWE-601
inline HTMLIFrameElement::HTMLIFrameElement(Document& document) : HTMLFrameElementBase(iframeTag, document), collapsed_by_client_(false), sandbox_(HTMLIFrameElementSandbox::Create(this)), referrer_policy_(kReferrerPolicyDefault) {}
172,929
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: ip6t_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table) { unsigned int hook = state->hook; static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); /* Initializing verdict to NF_DROP keeps gcc happy. */ unsigned int verdict = NF_DROP; const char *indev, *outdev; const void *table_base; struct ip6t_entry *e, **jumpstack; unsigned int stackidx, cpu; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; /* Initialization */ stackidx = 0; indev = state->in ? state->in->name : nulldevname; outdev = state->out ? state->out->name : nulldevname; /* We handle fragments by dealing with the first fragment as * if it was a normal packet. All other fragments are treated * normally, except that they will NEVER match rules that ask * things we don't know, ie. tcp syn flag or ports). If the * rule is also a fragment-specific rule, non-fragments won't * match it. */ acpar.hotdrop = false; acpar.state = state; WARN_ON(!(table->valid_hooks & (1 << hook))); local_bh_disable(); addend = xt_write_recseq_begin(); private = READ_ONCE(table->private); /* Address dependency. */ cpu = smp_processor_id(); table_base = private->entries; jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; /* Switch to alternate jumpstack if we're being invoked via TEE. * TEE issues XT_CONTINUE verdict on original skb so we must not * clobber the jumpstack. * * For recursion via REJECT or SYNPROXY the stack will be clobbered * but it is no problem since absolute verdict is issued by these. */ if (static_key_false(&xt_tee_enabled)) jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated); e = get_entry(table_base, private->hook_entry[hook]); do { const struct xt_entry_target *t; const struct xt_entry_match *ematch; struct xt_counters *counter; WARN_ON(!e); acpar.thoff = 0; if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) { no_match: e = ip6t_next_entry(e); continue; } xt_ematch_foreach(ematch, e) { acpar.match = ematch->u.kernel.match; acpar.matchinfo = ematch->data; if (!acpar.match->match(skb, &acpar)) goto no_match; } counter = xt_get_this_cpu_counter(&e->counters); ADD_COUNTER(*counter, skb->len, 1); t = ip6t_get_target_c(e); WARN_ON(!t->u.kernel.target); #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) /* The packet is traced: log it */ if (unlikely(skb->nf_trace)) trace_packet(state->net, skb, hook, state->in, state->out, table->name, private, e); #endif /* Standard target? */ if (!t->u.kernel.target->target) { int v; v = ((struct xt_standard_target *)t)->verdict; if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { verdict = (unsigned int)(-v) - 1; break; } if (stackidx == 0) e = get_entry(table_base, private->underflow[hook]); else e = ip6t_next_entry(jumpstack[--stackidx]); continue; } if (table_base + v != ip6t_next_entry(e) && !(e->ipv6.flags & IP6T_F_GOTO)) { jumpstack[stackidx++] = e; } e = get_entry(table_base, v); continue; } acpar.target = t->u.kernel.target; acpar.targinfo = t->data; verdict = t->u.kernel.target->target(skb, &acpar); if (verdict == XT_CONTINUE) e = ip6t_next_entry(e); else /* Verdict */ break; } while (!acpar.hotdrop); xt_write_recseq_end(addend); local_bh_enable(); if (acpar.hotdrop) return NF_DROP; else return verdict; } Commit Message: netfilter: add back stackpointer size checks The rationale for removing the check is only correct for rulesets generated by ip(6)tables. In iptables, a jump can only occur to a user-defined chain, i.e. because we size the stack based on number of user-defined chains we cannot exceed stack size. However, the underlying binary format has no such restriction, and the validation step only ensures that the jump target is a valid rule start point. IOW, its possible to build a rule blob that has no user-defined chains but does contain a jump. If this happens, no jump stack gets allocated and crash occurs because no jumpstack was allocated. Fixes: 7814b6ec6d0d6 ("netfilter: xtables: don't save/restore jumpstack offset") Reported-by: [email protected] Signed-off-by: Florian Westphal <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]> CWE ID: CWE-476
ip6t_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table) { unsigned int hook = state->hook; static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); /* Initializing verdict to NF_DROP keeps gcc happy. */ unsigned int verdict = NF_DROP; const char *indev, *outdev; const void *table_base; struct ip6t_entry *e, **jumpstack; unsigned int stackidx, cpu; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; /* Initialization */ stackidx = 0; indev = state->in ? state->in->name : nulldevname; outdev = state->out ? state->out->name : nulldevname; /* We handle fragments by dealing with the first fragment as * if it was a normal packet. All other fragments are treated * normally, except that they will NEVER match rules that ask * things we don't know, ie. tcp syn flag or ports). If the * rule is also a fragment-specific rule, non-fragments won't * match it. */ acpar.hotdrop = false; acpar.state = state; WARN_ON(!(table->valid_hooks & (1 << hook))); local_bh_disable(); addend = xt_write_recseq_begin(); private = READ_ONCE(table->private); /* Address dependency. */ cpu = smp_processor_id(); table_base = private->entries; jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; /* Switch to alternate jumpstack if we're being invoked via TEE. * TEE issues XT_CONTINUE verdict on original skb so we must not * clobber the jumpstack. * * For recursion via REJECT or SYNPROXY the stack will be clobbered * but it is no problem since absolute verdict is issued by these. */ if (static_key_false(&xt_tee_enabled)) jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated); e = get_entry(table_base, private->hook_entry[hook]); do { const struct xt_entry_target *t; const struct xt_entry_match *ematch; struct xt_counters *counter; WARN_ON(!e); acpar.thoff = 0; if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) { no_match: e = ip6t_next_entry(e); continue; } xt_ematch_foreach(ematch, e) { acpar.match = ematch->u.kernel.match; acpar.matchinfo = ematch->data; if (!acpar.match->match(skb, &acpar)) goto no_match; } counter = xt_get_this_cpu_counter(&e->counters); ADD_COUNTER(*counter, skb->len, 1); t = ip6t_get_target_c(e); WARN_ON(!t->u.kernel.target); #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) /* The packet is traced: log it */ if (unlikely(skb->nf_trace)) trace_packet(state->net, skb, hook, state->in, state->out, table->name, private, e); #endif /* Standard target? */ if (!t->u.kernel.target->target) { int v; v = ((struct xt_standard_target *)t)->verdict; if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { verdict = (unsigned int)(-v) - 1; break; } if (stackidx == 0) e = get_entry(table_base, private->underflow[hook]); else e = ip6t_next_entry(jumpstack[--stackidx]); continue; } if (table_base + v != ip6t_next_entry(e) && !(e->ipv6.flags & IP6T_F_GOTO)) { if (unlikely(stackidx >= private->stacksize)) { verdict = NF_DROP; break; } jumpstack[stackidx++] = e; } e = get_entry(table_base, v); continue; } acpar.target = t->u.kernel.target; acpar.targinfo = t->data; verdict = t->u.kernel.target->target(skb, &acpar); if (verdict == XT_CONTINUE) e = ip6t_next_entry(e); else /* Verdict */ break; } while (!acpar.hotdrop); xt_write_recseq_end(addend); local_bh_enable(); if (acpar.hotdrop) return NF_DROP; else return verdict; }
169,364
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void cliRefreshPrompt(void) { int len; if (config.eval_ldb) return; if (config.hostsocket != NULL) len = snprintf(config.prompt,sizeof(config.prompt),"redis %s", config.hostsocket); else len = anetFormatAddr(config.prompt, sizeof(config.prompt), config.hostip, config.hostport); /* Add [dbnum] if needed */ if (config.dbnum != 0) len += snprintf(config.prompt+len,sizeof(config.prompt)-len,"[%d]", config.dbnum); snprintf(config.prompt+len,sizeof(config.prompt)-len,"> "); } Commit Message: Security: fix redis-cli buffer overflow. Thanks to Fakhri Zulkifli for reporting it. The fix switched to dynamic allocation, copying the final prompt in the static buffer only at the end. CWE ID: CWE-119
static void cliRefreshPrompt(void) { if (config.eval_ldb) return; sds prompt = sdsempty(); if (config.hostsocket != NULL) { prompt = sdscatfmt(prompt,"redis %s",config.hostsocket); } else { char addr[256]; anetFormatAddr(addr, sizeof(addr), config.hostip, config.hostport); prompt = sdscatlen(prompt,addr,strlen(addr)); } /* Add [dbnum] if needed */ if (config.dbnum != 0) prompt = sdscatfmt(prompt,"[%i]",config.dbnum); /* Copy the prompt in the static buffer. */ prompt = sdscatlen(prompt,"> ",2); snprintf(config.prompt,sizeof(config.prompt),"%s",prompt); sdsfree(prompt); }
169,196
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int http_receive_data(HTTPContext *c) { HTTPContext *c1; int len, loop_run = 0; while (c->chunked_encoding && !c->chunk_size && c->buffer_end > c->buffer_ptr) { /* read chunk header, if present */ len = recv(c->fd, c->buffer_ptr, 1, 0); if (len < 0) { if (ff_neterrno() != AVERROR(EAGAIN) && ff_neterrno() != AVERROR(EINTR)) /* error : close connection */ goto fail; return 0; } else if (len == 0) { /* end of connection : close it */ goto fail; } else if (c->buffer_ptr - c->buffer >= 2 && !memcmp(c->buffer_ptr - 1, "\r\n", 2)) { c->chunk_size = strtol(c->buffer, 0, 16); if (c->chunk_size == 0) // end of stream goto fail; c->buffer_ptr = c->buffer; break; } else if (++loop_run > 10) /* no chunk header, abort */ goto fail; else c->buffer_ptr++; } if (c->buffer_end > c->buffer_ptr) { len = recv(c->fd, c->buffer_ptr, FFMIN(c->chunk_size, c->buffer_end - c->buffer_ptr), 0); if (len < 0) { if (ff_neterrno() != AVERROR(EAGAIN) && ff_neterrno() != AVERROR(EINTR)) /* error : close connection */ goto fail; } else if (len == 0) /* end of connection : close it */ goto fail; else { c->chunk_size -= len; c->buffer_ptr += len; c->data_count += len; update_datarate(&c->datarate, c->data_count); } } if (c->buffer_ptr - c->buffer >= 2 && c->data_count > FFM_PACKET_SIZE) { if (c->buffer[0] != 'f' || c->buffer[1] != 'm') { http_log("Feed stream has become desynchronized -- disconnecting\n"); goto fail; } } if (c->buffer_ptr >= c->buffer_end) { FFServerStream *feed = c->stream; /* a packet has been received : write it in the store, except * if header */ if (c->data_count > FFM_PACKET_SIZE) { /* XXX: use llseek or url_seek * XXX: Should probably fail? */ if (lseek(c->feed_fd, feed->feed_write_index, SEEK_SET) == -1) http_log("Seek to %"PRId64" failed\n", feed->feed_write_index); if (write(c->feed_fd, c->buffer, FFM_PACKET_SIZE) < 0) { http_log("Error writing to feed file: %s\n", strerror(errno)); goto fail; } feed->feed_write_index += FFM_PACKET_SIZE; /* update file size */ if (feed->feed_write_index > c->stream->feed_size) feed->feed_size = feed->feed_write_index; /* handle wrap around if max file size reached */ if (c->stream->feed_max_size && feed->feed_write_index >= c->stream->feed_max_size) feed->feed_write_index = FFM_PACKET_SIZE; /* write index */ if (ffm_write_write_index(c->feed_fd, feed->feed_write_index) < 0) { http_log("Error writing index to feed file: %s\n", strerror(errno)); goto fail; } /* wake up any waiting connections */ for(c1 = first_http_ctx; c1; c1 = c1->next) { if (c1->state == HTTPSTATE_WAIT_FEED && c1->stream->feed == c->stream->feed) c1->state = HTTPSTATE_SEND_DATA; } } else { /* We have a header in our hands that contains useful data */ AVFormatContext *s = avformat_alloc_context(); AVIOContext *pb; AVInputFormat *fmt_in; int i; if (!s) goto fail; /* use feed output format name to find corresponding input format */ fmt_in = av_find_input_format(feed->fmt->name); if (!fmt_in) goto fail; pb = avio_alloc_context(c->buffer, c->buffer_end - c->buffer, 0, NULL, NULL, NULL, NULL); if (!pb) goto fail; pb->seekable = 0; s->pb = pb; if (avformat_open_input(&s, c->stream->feed_filename, fmt_in, NULL) < 0) { av_freep(&pb); goto fail; } /* Now we have the actual streams */ if (s->nb_streams != feed->nb_streams) { avformat_close_input(&s); av_freep(&pb); http_log("Feed '%s' stream number does not match registered feed\n", c->stream->feed_filename); goto fail; } for (i = 0; i < s->nb_streams; i++) { LayeredAVStream *fst = feed->streams[i]; AVStream *st = s->streams[i]; avcodec_parameters_to_context(fst->codec, st->codecpar); avcodec_parameters_from_context(fst->codecpar, fst->codec); } avformat_close_input(&s); av_freep(&pb); } c->buffer_ptr = c->buffer; } return 0; fail: c->stream->feed_opened = 0; close(c->feed_fd); /* wake up any waiting connections to stop waiting for feed */ for(c1 = first_http_ctx; c1; c1 = c1->next) { if (c1->state == HTTPSTATE_WAIT_FEED && c1->stream->feed == c->stream->feed) c1->state = HTTPSTATE_SEND_DATA_TRAILER; } return -1; } Commit Message: ffserver: Check chunk size Fixes out of array access Fixes: poc_ffserver.py Found-by: Paul Cher <[email protected]> Signed-off-by: Michael Niedermayer <[email protected]> CWE ID: CWE-119
static int http_receive_data(HTTPContext *c) { HTTPContext *c1; int len, loop_run = 0; while (c->chunked_encoding && !c->chunk_size && c->buffer_end > c->buffer_ptr) { /* read chunk header, if present */ len = recv(c->fd, c->buffer_ptr, 1, 0); if (len < 0) { if (ff_neterrno() != AVERROR(EAGAIN) && ff_neterrno() != AVERROR(EINTR)) /* error : close connection */ goto fail; return 0; } else if (len == 0) { /* end of connection : close it */ goto fail; } else if (c->buffer_ptr - c->buffer >= 2 && !memcmp(c->buffer_ptr - 1, "\r\n", 2)) { c->chunk_size = strtol(c->buffer, 0, 16); if (c->chunk_size <= 0) { // end of stream or invalid chunk size c->chunk_size = 0; goto fail; } c->buffer_ptr = c->buffer; break; } else if (++loop_run > 10) /* no chunk header, abort */ goto fail; else c->buffer_ptr++; } if (c->buffer_end > c->buffer_ptr) { len = recv(c->fd, c->buffer_ptr, FFMIN(c->chunk_size, c->buffer_end - c->buffer_ptr), 0); if (len < 0) { if (ff_neterrno() != AVERROR(EAGAIN) && ff_neterrno() != AVERROR(EINTR)) /* error : close connection */ goto fail; } else if (len == 0) /* end of connection : close it */ goto fail; else { av_assert0(len <= c->chunk_size); c->chunk_size -= len; c->buffer_ptr += len; c->data_count += len; update_datarate(&c->datarate, c->data_count); } } if (c->buffer_ptr - c->buffer >= 2 && c->data_count > FFM_PACKET_SIZE) { if (c->buffer[0] != 'f' || c->buffer[1] != 'm') { http_log("Feed stream has become desynchronized -- disconnecting\n"); goto fail; } } if (c->buffer_ptr >= c->buffer_end) { FFServerStream *feed = c->stream; /* a packet has been received : write it in the store, except * if header */ if (c->data_count > FFM_PACKET_SIZE) { /* XXX: use llseek or url_seek * XXX: Should probably fail? */ if (lseek(c->feed_fd, feed->feed_write_index, SEEK_SET) == -1) http_log("Seek to %"PRId64" failed\n", feed->feed_write_index); if (write(c->feed_fd, c->buffer, FFM_PACKET_SIZE) < 0) { http_log("Error writing to feed file: %s\n", strerror(errno)); goto fail; } feed->feed_write_index += FFM_PACKET_SIZE; /* update file size */ if (feed->feed_write_index > c->stream->feed_size) feed->feed_size = feed->feed_write_index; /* handle wrap around if max file size reached */ if (c->stream->feed_max_size && feed->feed_write_index >= c->stream->feed_max_size) feed->feed_write_index = FFM_PACKET_SIZE; /* write index */ if (ffm_write_write_index(c->feed_fd, feed->feed_write_index) < 0) { http_log("Error writing index to feed file: %s\n", strerror(errno)); goto fail; } /* wake up any waiting connections */ for(c1 = first_http_ctx; c1; c1 = c1->next) { if (c1->state == HTTPSTATE_WAIT_FEED && c1->stream->feed == c->stream->feed) c1->state = HTTPSTATE_SEND_DATA; } } else { /* We have a header in our hands that contains useful data */ AVFormatContext *s = avformat_alloc_context(); AVIOContext *pb; AVInputFormat *fmt_in; int i; if (!s) goto fail; /* use feed output format name to find corresponding input format */ fmt_in = av_find_input_format(feed->fmt->name); if (!fmt_in) goto fail; pb = avio_alloc_context(c->buffer, c->buffer_end - c->buffer, 0, NULL, NULL, NULL, NULL); if (!pb) goto fail; pb->seekable = 0; s->pb = pb; if (avformat_open_input(&s, c->stream->feed_filename, fmt_in, NULL) < 0) { av_freep(&pb); goto fail; } /* Now we have the actual streams */ if (s->nb_streams != feed->nb_streams) { avformat_close_input(&s); av_freep(&pb); http_log("Feed '%s' stream number does not match registered feed\n", c->stream->feed_filename); goto fail; } for (i = 0; i < s->nb_streams; i++) { LayeredAVStream *fst = feed->streams[i]; AVStream *st = s->streams[i]; avcodec_parameters_to_context(fst->codec, st->codecpar); avcodec_parameters_from_context(fst->codecpar, fst->codec); } avformat_close_input(&s); av_freep(&pb); } c->buffer_ptr = c->buffer; } return 0; fail: c->stream->feed_opened = 0; close(c->feed_fd); /* wake up any waiting connections to stop waiting for feed */ for(c1 = first_http_ctx; c1; c1 = c1->next) { if (c1->state == HTTPSTATE_WAIT_FEED && c1->stream->feed == c->stream->feed) c1->state = HTTPSTATE_SEND_DATA_TRAILER; } return -1; }
168,494
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: ripng_print(netdissect_options *ndo, const u_char *dat, unsigned int length) { register const struct rip6 *rp = (const struct rip6 *)dat; register const struct netinfo6 *ni; register u_int amt; register u_int i; int j; int trunc; if (ndo->ndo_snapend < dat) return; amt = ndo->ndo_snapend - dat; i = min(length, amt); if (i < (sizeof(struct rip6) - sizeof(struct netinfo6))) return; i -= (sizeof(struct rip6) - sizeof(struct netinfo6)); switch (rp->rip6_cmd) { case RIP6_REQUEST: j = length / sizeof(*ni); if (j == 1 && rp->rip6_nets->rip6_metric == HOPCNT_INFINITY6 && IN6_IS_ADDR_UNSPECIFIED(&rp->rip6_nets->rip6_dest)) { ND_PRINT((ndo, " ripng-req dump")); break; } if (j * sizeof(*ni) != length - 4) ND_PRINT((ndo, " ripng-req %d[%u]:", j, length)); else ND_PRINT((ndo, " ripng-req %d:", j)); trunc = ((i / sizeof(*ni)) * sizeof(*ni) != i); for (ni = rp->rip6_nets; i >= sizeof(*ni); i -= sizeof(*ni), ++ni) { if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n\t")); else ND_PRINT((ndo, " ")); rip6_entry_print(ndo, ni, 0); } break; case RIP6_RESPONSE: j = length / sizeof(*ni); if (j * sizeof(*ni) != length - 4) ND_PRINT((ndo, " ripng-resp %d[%u]:", j, length)); else ND_PRINT((ndo, " ripng-resp %d:", j)); trunc = ((i / sizeof(*ni)) * sizeof(*ni) != i); for (ni = rp->rip6_nets; i >= sizeof(*ni); i -= sizeof(*ni), ++ni) { if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n\t")); else ND_PRINT((ndo, " ")); rip6_entry_print(ndo, ni, ni->rip6_metric); } if (trunc) ND_PRINT((ndo, "[|ripng]")); break; default: ND_PRINT((ndo, " ripng-%d ?? %u", rp->rip6_cmd, length)); break; } if (rp->rip6_vers != RIP6_VERSION) ND_PRINT((ndo, " [vers %d]", rp->rip6_vers)); } Commit Message: CVE-2017-12992/RIPng: Clean up bounds checking. Do bounds checking as we access items. Scan the list of netinfo6 entries based on the supplied packet length, without taking the captured length into account; let the aforementioned bounds checking handle that. This fixes a buffer over-read discovered by Kamil Frankowicz. Add a test using the capture file supplied by the reporter(s). CWE ID: CWE-125
ripng_print(netdissect_options *ndo, const u_char *dat, unsigned int length) { register const struct rip6 *rp = (const struct rip6 *)dat; register const struct netinfo6 *ni; unsigned int length_left; u_int j; ND_TCHECK(rp->rip6_cmd); switch (rp->rip6_cmd) { case RIP6_REQUEST: length_left = length; if (length_left < (sizeof(struct rip6) - sizeof(struct netinfo6))) goto trunc; length_left -= (sizeof(struct rip6) - sizeof(struct netinfo6)); j = length_left / sizeof(*ni); if (j == 1) { ND_TCHECK(rp->rip6_nets); if (rp->rip6_nets->rip6_metric == HOPCNT_INFINITY6 && IN6_IS_ADDR_UNSPECIFIED(&rp->rip6_nets->rip6_dest)) { ND_PRINT((ndo, " ripng-req dump")); break; } } if (j * sizeof(*ni) != length_left) ND_PRINT((ndo, " ripng-req %u[%u]:", j, length)); else ND_PRINT((ndo, " ripng-req %u:", j)); for (ni = rp->rip6_nets; length_left >= sizeof(*ni); length_left -= sizeof(*ni), ++ni) { ND_TCHECK(*ni); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n\t")); else ND_PRINT((ndo, " ")); rip6_entry_print(ndo, ni, 0); } if (length_left != 0) goto trunc; break; case RIP6_RESPONSE: length_left = length; if (length_left < (sizeof(struct rip6) - sizeof(struct netinfo6))) goto trunc; length_left -= (sizeof(struct rip6) - sizeof(struct netinfo6)); j = length_left / sizeof(*ni); if (j * sizeof(*ni) != length_left) ND_PRINT((ndo, " ripng-resp %d[%u]:", j, length)); else ND_PRINT((ndo, " ripng-resp %d:", j)); for (ni = rp->rip6_nets; length_left >= sizeof(*ni); length_left -= sizeof(*ni), ++ni) { ND_TCHECK(*ni); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n\t")); else ND_PRINT((ndo, " ")); rip6_entry_print(ndo, ni, ni->rip6_metric); } if (length_left != 0) goto trunc; break; default: ND_PRINT((ndo, " ripng-%d ?? %u", rp->rip6_cmd, length)); break; } ND_TCHECK(rp->rip6_vers); if (rp->rip6_vers != RIP6_VERSION) ND_PRINT((ndo, " [vers %d]", rp->rip6_vers)); return; trunc: ND_PRINT((ndo, "[|ripng]")); return; }
167,922
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: long long SegmentInfo::GetDuration() const { if (m_duration < 0) return -1; assert(m_timecodeScale >= 1); const double dd = double(m_duration) * double(m_timecodeScale); const long long d = static_cast<long long>(dd); return d; } Commit Message: libwebm: Pull from upstream Rolling mkvparser from upstream. Primarily for fixing a bug on parsing failures with certain Opus WebM files. Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae The diff is so huge because there were some style clean ups upstream. But it was ensured that there were no breaking changes when the style clean ups was done upstream. Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039 CWE ID: CWE-119
long long SegmentInfo::GetDuration() const
174,307
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool UnpackOriginPermissions(const std::vector<std::string>& origins_input, const PermissionSet& required_permissions, const PermissionSet& optional_permissions, bool allow_file_access, UnpackPermissionSetResult* result, std::string* error) { int user_script_schemes = UserScript::ValidUserScriptSchemes(); int explicit_schemes = Extension::kValidHostPermissionSchemes; if (!allow_file_access) { user_script_schemes &= ~URLPattern::SCHEME_FILE; explicit_schemes &= ~URLPattern::SCHEME_FILE; } for (const auto& origin_str : origins_input) { URLPattern explicit_origin(explicit_schemes); URLPattern::ParseResult parse_result = explicit_origin.Parse(origin_str); if (URLPattern::ParseResult::kSuccess != parse_result) { *error = ErrorUtils::FormatErrorMessage( kInvalidOrigin, origin_str, URLPattern::GetParseResultString(parse_result)); return false; } bool used_origin = false; if (required_permissions.explicit_hosts().ContainsPattern( explicit_origin)) { used_origin = true; result->required_explicit_hosts.AddPattern(explicit_origin); } else if (optional_permissions.explicit_hosts().ContainsPattern( explicit_origin)) { used_origin = true; result->optional_explicit_hosts.AddPattern(explicit_origin); } URLPattern scriptable_origin(user_script_schemes); if (scriptable_origin.Parse(origin_str) == URLPattern::ParseResult::kSuccess && required_permissions.scriptable_hosts().ContainsPattern( scriptable_origin)) { used_origin = true; result->required_scriptable_hosts.AddPattern(scriptable_origin); } if (!used_origin) result->unlisted_hosts.AddPattern(explicit_origin); } return true; } Commit Message: [Extensions] Have URLPattern::Contains() properly check schemes Have URLPattern::Contains() properly check the schemes of the patterns when evaluating if one pattern contains another. This is important in order to prevent extensions from requesting chrome:-scheme permissions via the permissions API when <all_urls> is specified as an optional permission. Bug: 859600,918470 Change-Id: If04d945ad0c939e84a80d83502c0f84b6ef0923d Reviewed-on: https://chromium-review.googlesource.com/c/1396561 Commit-Queue: Devlin <[email protected]> Reviewed-by: Karan Bhatia <[email protected]> Cr-Commit-Position: refs/heads/master@{#621410} CWE ID: CWE-79
bool UnpackOriginPermissions(const std::vector<std::string>& origins_input, const PermissionSet& required_permissions, const PermissionSet& optional_permissions, bool allow_file_access, UnpackPermissionSetResult* result, std::string* error) { int user_script_schemes = UserScript::ValidUserScriptSchemes(); int explicit_schemes = Extension::kValidHostPermissionSchemes; if (!allow_file_access) { user_script_schemes &= ~URLPattern::SCHEME_FILE; explicit_schemes &= ~URLPattern::SCHEME_FILE; } auto filter_chrome_scheme = [](URLPattern* pattern) { // We disallow the chrome:-scheme unless the pattern is explicitly // "chrome://..." - that is, <all_urls> should not match the chrome:-scheme. // Patterns which explicitly specify the chrome:-scheme are safe, since // manifest parsing won't allow them unless the kExtensionsOnChromeURLs // switch is enabled. // Note that we don't check PermissionsData::AllUrlsIncludesChromeUrls() // here, since that's only needed for Chromevox (which doesn't use optional // permissions). if (pattern->scheme() != content::kChromeUIScheme) { // NOTE: We use pattern->valid_schemes() here (instead of // |user_script_schemes| or |explicit_schemes|) because // URLPattern::Parse() can mutate the valid schemes for a pattern, and we // don't want to override those changes. pattern->SetValidSchemes(pattern->valid_schemes() & ~URLPattern::SCHEME_CHROMEUI); } }; for (const auto& origin_str : origins_input) { URLPattern explicit_origin(explicit_schemes); URLPattern::ParseResult parse_result = explicit_origin.Parse(origin_str); if (URLPattern::ParseResult::kSuccess != parse_result) { *error = ErrorUtils::FormatErrorMessage( kInvalidOrigin, origin_str, URLPattern::GetParseResultString(parse_result)); return false; } filter_chrome_scheme(&explicit_origin); bool used_origin = false; if (required_permissions.explicit_hosts().ContainsPattern( explicit_origin)) { used_origin = true; result->required_explicit_hosts.AddPattern(explicit_origin); } else if (optional_permissions.explicit_hosts().ContainsPattern( explicit_origin)) { used_origin = true; result->optional_explicit_hosts.AddPattern(explicit_origin); } URLPattern scriptable_origin(user_script_schemes); if (scriptable_origin.Parse(origin_str) == URLPattern::ParseResult::kSuccess) { filter_chrome_scheme(&scriptable_origin); if (required_permissions.scriptable_hosts().ContainsPattern( scriptable_origin)) { used_origin = true; result->required_scriptable_hosts.AddPattern(scriptable_origin); } } if (!used_origin) result->unlisted_hosts.AddPattern(explicit_origin); } return true; }
173,118
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int mem_read(jas_stream_obj_t *obj, char *buf, int cnt) { int n; assert(cnt >= 0); assert(buf); JAS_DBGLOG(100, ("mem_read(%p, %p, %d)\n", obj, buf, cnt)); jas_stream_memobj_t *m = (jas_stream_memobj_t *)obj; n = m->len_ - m->pos_; cnt = JAS_MIN(n, cnt); memcpy(buf, &m->buf_[m->pos_], cnt); m->pos_ += cnt; return cnt; } Commit Message: Made some changes to the I/O stream library for memory streams. There were a number of potential problems due to the possibility of integer overflow. Changed some integral types to the larger types size_t or ssize_t. For example, the function mem_resize now takes the buffer size parameter as a size_t. Added a new function jas_stream_memopen2, which takes a buffer size specified as a size_t instead of an int. This can be used in jas_image_cmpt_create to avoid potential overflow problems. Added a new function jas_deprecated to warn about reliance on deprecated library behavior. CWE ID: CWE-190
static int mem_read(jas_stream_obj_t *obj, char *buf, int cnt) { ssize_t n; assert(cnt >= 0); assert(buf); JAS_DBGLOG(100, ("mem_read(%p, %p, %d)\n", obj, buf, cnt)); jas_stream_memobj_t *m = (jas_stream_memobj_t *)obj; n = m->len_ - m->pos_; cnt = JAS_MIN(n, cnt); memcpy(buf, &m->buf_[m->pos_], cnt); m->pos_ += cnt; return cnt; }
168,749
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: ResourceHostMsg_Request CreateXHRRequestWithOrigin(const char* origin) { ResourceHostMsg_Request request; request.method = "GET"; request.url = GURL("http://bar.com/simple_page.html"); request.first_party_for_cookies = GURL(origin); request.referrer_policy = blink::WebReferrerPolicyDefault; request.headers = base::StringPrintf("Origin: %s\r\n", origin); request.load_flags = 0; request.origin_pid = 0; request.resource_type = RESOURCE_TYPE_XHR; request.request_context = 0; request.appcache_host_id = kAppCacheNoHostId; request.download_to_file = false; request.should_reset_appcache = false; request.is_main_frame = true; request.parent_is_main_frame = false; request.parent_render_frame_id = -1; request.transition_type = ui::PAGE_TRANSITION_LINK; request.allow_download = true; return request; } Commit Message: Block a compromised renderer from reusing request ids. BUG=578882 Review URL: https://codereview.chromium.org/1608573002 Cr-Commit-Position: refs/heads/master@{#372547} CWE ID: CWE-362
ResourceHostMsg_Request CreateXHRRequestWithOrigin(const char* origin) { ResourceHostMsg_Request CreateXHRRequest(const char* url) { ResourceHostMsg_Request request; request.method = "GET"; request.url = GURL(url); request.referrer_policy = blink::WebReferrerPolicyDefault; request.load_flags = 0; request.origin_pid = 0; request.resource_type = RESOURCE_TYPE_XHR; request.request_context = 0; request.appcache_host_id = kAppCacheNoHostId; request.download_to_file = false; request.should_reset_appcache = false; request.is_main_frame = true; request.parent_is_main_frame = false; request.parent_render_frame_id = -1; request.transition_type = ui::PAGE_TRANSITION_LINK; request.allow_download = true; return request; }
172,272
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool asn1_write_LDAPString(struct asn1_data *data, const char *s) { asn1_write(data, s, strlen(s)); return !data->has_error; } Commit Message: CWE ID: CWE-399
bool asn1_write_LDAPString(struct asn1_data *data, const char *s) { return asn1_write(data, s, strlen(s)); }
164,590
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static GIOChannel *irssi_ssl_get_iochannel(GIOChannel *handle, const char *mycert, const char *mypkey, const char *cafile, const char *capath, gboolean verify) { GIOSSLChannel *chan; GIOChannel *gchan; int fd; SSL *ssl; SSL_CTX *ctx = NULL; g_return_val_if_fail(handle != NULL, NULL); if(!ssl_ctx && !irssi_ssl_init()) return NULL; if(!(fd = g_io_channel_unix_get_fd(handle))) return NULL; if (mycert && *mycert) { char *scert = NULL, *spkey = NULL; if ((ctx = SSL_CTX_new(SSLv23_client_method())) == NULL) { g_error("Could not allocate memory for SSL context"); return NULL; } scert = convert_home(mycert); if (mypkey && *mypkey) spkey = convert_home(mypkey); if (! SSL_CTX_use_certificate_file(ctx, scert, SSL_FILETYPE_PEM)) g_warning("Loading of client certificate '%s' failed", mycert); else if (! SSL_CTX_use_PrivateKey_file(ctx, spkey ? spkey : scert, SSL_FILETYPE_PEM)) g_warning("Loading of private key '%s' failed", mypkey ? mypkey : mycert); else if (! SSL_CTX_check_private_key(ctx)) g_warning("Private key does not match the certificate"); g_free(scert); g_free(spkey); } if ((cafile && *cafile) || (capath && *capath)) { char *scafile = NULL; char *scapath = NULL; if (! ctx && (ctx = SSL_CTX_new(SSLv23_client_method())) == NULL) { g_error("Could not allocate memory for SSL context"); return NULL; } if (cafile && *cafile) scafile = convert_home(cafile); if (capath && *capath) scapath = convert_home(capath); if (! SSL_CTX_load_verify_locations(ctx, scafile, scapath)) { g_warning("Could not load CA list for verifying SSL server certificate"); g_free(scafile); g_free(scapath); SSL_CTX_free(ctx); return NULL; } g_free(scafile); g_free(scapath); verify = TRUE; } if (ctx == NULL) ctx = ssl_ctx; if(!(ssl = SSL_new(ctx))) { g_warning("Failed to allocate SSL structure"); return NULL; } if(!SSL_set_fd(ssl, fd)) { g_warning("Failed to associate socket to SSL stream"); SSL_free(ssl); if (ctx != ssl_ctx) SSL_CTX_free(ctx); return NULL; } SSL_set_mode(ssl, SSL_MODE_ENABLE_PARTIAL_WRITE | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER); chan = g_new0(GIOSSLChannel, 1); chan->fd = fd; chan->giochan = handle; chan->ssl = ssl; chan->ctx = ctx; chan->verify = verify; gchan = (GIOChannel *)chan; gchan->funcs = &irssi_ssl_channel_funcs; g_io_channel_init(gchan); gchan->is_readable = gchan->is_writeable = TRUE; gchan->use_buffer = FALSE; return gchan; } Commit Message: Check if an SSL certificate matches the hostname of the server we are connecting to git-svn-id: http://svn.irssi.org/repos/irssi/trunk@5104 dbcabf3a-b0e7-0310-adc4-f8d773084564 CWE ID: CWE-20
static GIOChannel *irssi_ssl_get_iochannel(GIOChannel *handle, const char *mycert, const char *mypkey, const char *cafile, const char *capath, gboolean verify) static GIOChannel *irssi_ssl_get_iochannel(GIOChannel *handle, const char *hostname, const char *mycert, const char *mypkey, const char *cafile, const char *capath, gboolean verify) { GIOSSLChannel *chan; GIOChannel *gchan; int fd; SSL *ssl; SSL_CTX *ctx = NULL; g_return_val_if_fail(handle != NULL, NULL); if(!ssl_ctx && !irssi_ssl_init()) return NULL; if(!(fd = g_io_channel_unix_get_fd(handle))) return NULL; if (mycert && *mycert) { char *scert = NULL, *spkey = NULL; if ((ctx = SSL_CTX_new(SSLv23_client_method())) == NULL) { g_error("Could not allocate memory for SSL context"); return NULL; } scert = convert_home(mycert); if (mypkey && *mypkey) spkey = convert_home(mypkey); if (! SSL_CTX_use_certificate_file(ctx, scert, SSL_FILETYPE_PEM)) g_warning("Loading of client certificate '%s' failed", mycert); else if (! SSL_CTX_use_PrivateKey_file(ctx, spkey ? spkey : scert, SSL_FILETYPE_PEM)) g_warning("Loading of private key '%s' failed", mypkey ? mypkey : mycert); else if (! SSL_CTX_check_private_key(ctx)) g_warning("Private key does not match the certificate"); g_free(scert); g_free(spkey); } if ((cafile && *cafile) || (capath && *capath)) { char *scafile = NULL; char *scapath = NULL; if (! ctx && (ctx = SSL_CTX_new(SSLv23_client_method())) == NULL) { g_error("Could not allocate memory for SSL context"); return NULL; } if (cafile && *cafile) scafile = convert_home(cafile); if (capath && *capath) scapath = convert_home(capath); if (! SSL_CTX_load_verify_locations(ctx, scafile, scapath)) { g_warning("Could not load CA list for verifying SSL server certificate"); g_free(scafile); g_free(scapath); SSL_CTX_free(ctx); return NULL; } g_free(scafile); g_free(scapath); verify = TRUE; } if (ctx == NULL) ctx = ssl_ctx; if(!(ssl = SSL_new(ctx))) { g_warning("Failed to allocate SSL structure"); return NULL; } if(!SSL_set_fd(ssl, fd)) { g_warning("Failed to associate socket to SSL stream"); SSL_free(ssl); if (ctx != ssl_ctx) SSL_CTX_free(ctx); return NULL; } SSL_set_mode(ssl, SSL_MODE_ENABLE_PARTIAL_WRITE | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER); chan = g_new0(GIOSSLChannel, 1); chan->fd = fd; chan->giochan = handle; chan->ssl = ssl; chan->ctx = ctx; chan->verify = verify; chan->hostname = hostname; gchan = (GIOChannel *)chan; gchan->funcs = &irssi_ssl_channel_funcs; g_io_channel_init(gchan); gchan->is_readable = gchan->is_writeable = TRUE; gchan->use_buffer = FALSE; return gchan; }
165,516
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void SelectionEditor::NodeWillBeRemoved(Node& node_to_be_removed) { if (selection_.IsNone()) return; const Position old_base = selection_.base_; const Position old_extent = selection_.extent_; const Position& new_base = ComputePositionForNodeRemoval(old_base, node_to_be_removed); const Position& new_extent = ComputePositionForNodeRemoval(old_extent, node_to_be_removed); if (new_base == old_base && new_extent == old_extent) return; selection_ = SelectionInDOMTree::Builder() .SetBaseAndExtent(new_base, new_extent) .SetIsHandleVisible(selection_.IsHandleVisible()) .Build(); MarkCacheDirty(); } Commit Message: Move SelectionTemplate::is_handle_visible_ to FrameSelection This patch moves |is_handle_visible_| to |FrameSelection| from |SelectionTemplate| since handle visibility is used only for setting |FrameSelection|, hence it is a redundant member variable of |SelectionTemplate|. Bug: 742093 Change-Id: I3add4da3844fb40be34dcb4d4b46b5fa6fed1d7e Reviewed-on: https://chromium-review.googlesource.com/595389 Commit-Queue: Yoshifumi Inoue <[email protected]> Reviewed-by: Xiaocheng Hu <[email protected]> Reviewed-by: Kent Tamura <[email protected]> Cr-Commit-Position: refs/heads/master@{#491660} CWE ID: CWE-119
void SelectionEditor::NodeWillBeRemoved(Node& node_to_be_removed) { if (selection_.IsNone()) return; const Position old_base = selection_.base_; const Position old_extent = selection_.extent_; const Position& new_base = ComputePositionForNodeRemoval(old_base, node_to_be_removed); const Position& new_extent = ComputePositionForNodeRemoval(old_extent, node_to_be_removed); if (new_base == old_base && new_extent == old_extent) return; selection_ = SelectionInDOMTree::Builder() .SetBaseAndExtent(new_base, new_extent) .Build(); MarkCacheDirty(); }
171,765
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: dissect_usb_video_control_interface_descriptor(proto_tree *parent_tree, tvbuff_t *tvb, guint8 descriptor_len, packet_info *pinfo, usb_conv_info_t *usb_conv_info) { video_conv_info_t *video_conv_info = NULL; video_entity_t *entity = NULL; proto_item *item = NULL; proto_item *subtype_item = NULL; proto_tree *tree = NULL; guint8 entity_id = 0; guint16 terminal_type = 0; int offset = 0; guint8 subtype; subtype = tvb_get_guint8(tvb, offset+2); if (parent_tree) { const gchar *subtype_str; subtype_str = val_to_str_ext(subtype, &vc_if_descriptor_subtypes_ext, "Unknown (0x%x)"); tree = proto_tree_add_subtree_format(parent_tree, tvb, offset, descriptor_len, ett_descriptor_video_control, &item, "VIDEO CONTROL INTERFACE DESCRIPTOR [%s]", subtype_str); } /* Common fields */ dissect_usb_descriptor_header(tree, tvb, offset, &vid_descriptor_type_vals_ext); subtype_item = proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_subtype, tvb, offset+2, 1, ENC_LITTLE_ENDIAN); offset += 3; if (subtype == VC_HEADER) { guint8 num_vs_interfaces; proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_bcdUVC, tvb, offset, 2, ENC_LITTLE_ENDIAN); proto_tree_add_item(tree, hf_usb_vid_ifdesc_wTotalLength, tvb, offset+2, 2, ENC_LITTLE_ENDIAN); proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_dwClockFrequency, tvb, offset+4, 4, ENC_LITTLE_ENDIAN); num_vs_interfaces = tvb_get_guint8(tvb, offset+8); proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_bInCollection, tvb, offset+8, 1, ENC_LITTLE_ENDIAN); if (num_vs_interfaces > 0) { proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_baInterfaceNr, tvb, offset+9, num_vs_interfaces, ENC_NA); } offset += 9 + num_vs_interfaces; } else if ((subtype == VC_INPUT_TERMINAL) || (subtype == VC_OUTPUT_TERMINAL)) { /* Fields common to input and output terminals */ entity_id = tvb_get_guint8(tvb, offset); terminal_type = tvb_get_letohs(tvb, offset+1); proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_terminal_id, tvb, offset, 1, ENC_LITTLE_ENDIAN); proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_terminal_type, tvb, offset+1, 2, ENC_LITTLE_ENDIAN); proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_assoc_terminal, tvb, offset+3, 1, ENC_LITTLE_ENDIAN); offset += 4; if (subtype == VC_OUTPUT_TERMINAL) { proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_src_id, tvb, offset, 1, ENC_LITTLE_ENDIAN); ++offset; } proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_iTerminal, tvb, offset, 1, ENC_LITTLE_ENDIAN); ++offset; if (subtype == VC_INPUT_TERMINAL) { if (terminal_type == ITT_CAMERA) { offset = dissect_usb_video_camera_terminal(tree, tvb, offset); } else if (terminal_type == ITT_MEDIA_TRANSPORT_INPUT) { /* @todo */ } } if (subtype == VC_OUTPUT_TERMINAL) { if (terminal_type == OTT_MEDIA_TRANSPORT_OUTPUT) { /* @todo */ } } } else { /* Field common to extension / processing / selector / encoding units */ entity_id = tvb_get_guint8(tvb, offset); proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_unit_id, tvb, offset, 1, ENC_LITTLE_ENDIAN); ++offset; if (subtype == VC_PROCESSING_UNIT) { offset = dissect_usb_video_processing_unit(tree, tvb, offset); } else if (subtype == VC_SELECTOR_UNIT) { offset = dissect_usb_video_selector_unit(tree, tvb, offset); } else if (subtype == VC_EXTENSION_UNIT) { offset = dissect_usb_video_extension_unit(tree, tvb, offset); } else if (subtype == VC_ENCODING_UNIT) { /* @todo UVC 1.5 */ } else { expert_add_info_format(pinfo, subtype_item, &ei_usb_vid_subtype_unknown, "Unknown VC subtype %u", subtype); } } /* Soak up descriptor bytes beyond those we know how to dissect */ if (offset < descriptor_len) { proto_tree_add_item(tree, hf_usb_vid_descriptor_data, tvb, offset, descriptor_len-offset, ENC_NA); /* offset = descriptor_len; */ } if (entity_id != 0) proto_item_append_text(item, " (Entity %d)", entity_id); if (subtype != VC_HEADER && usb_conv_info) { /* Switch to the usb_conv_info of the Video Control interface */ usb_conv_info = get_usb_iface_conv_info(pinfo, usb_conv_info->interfaceNum); video_conv_info = (video_conv_info_t *)usb_conv_info->class_data; if (!video_conv_info) { video_conv_info = wmem_new(wmem_file_scope(), video_conv_info_t); video_conv_info->entities = wmem_tree_new(wmem_file_scope()); usb_conv_info->class_data = video_conv_info; } entity = (video_entity_t*) wmem_tree_lookup32(video_conv_info->entities, entity_id); if (!entity) { entity = wmem_new(wmem_file_scope(), video_entity_t); entity->entityID = entity_id; entity->subtype = subtype; entity->terminalType = terminal_type; wmem_tree_insert32(video_conv_info->entities, entity_id, entity); } } return descriptor_len; } Commit Message: Make class "type" for USB conversations. USB dissectors can't assume that only their class type has been passed around in the conversation. Make explicit check that class type expected matches the dissector and stop/prevent dissection if there isn't a match. Bug: 12356 Change-Id: Ib23973a4ebd0fbb51952ffc118daf95e3389a209 Reviewed-on: https://code.wireshark.org/review/15212 Petri-Dish: Michael Mann <[email protected]> Reviewed-by: Martin Kaiser <[email protected]> Petri-Dish: Martin Kaiser <[email protected]> Tested-by: Petri Dish Buildbot <[email protected]> Reviewed-by: Michael Mann <[email protected]> CWE ID: CWE-476
dissect_usb_video_control_interface_descriptor(proto_tree *parent_tree, tvbuff_t *tvb, guint8 descriptor_len, packet_info *pinfo, usb_conv_info_t *usb_conv_info) { video_conv_info_t *video_conv_info = NULL; video_entity_t *entity = NULL; proto_item *item = NULL; proto_item *subtype_item = NULL; proto_tree *tree = NULL; guint8 entity_id = 0; guint16 terminal_type = 0; int offset = 0; guint8 subtype; subtype = tvb_get_guint8(tvb, offset+2); if (parent_tree) { const gchar *subtype_str; subtype_str = val_to_str_ext(subtype, &vc_if_descriptor_subtypes_ext, "Unknown (0x%x)"); tree = proto_tree_add_subtree_format(parent_tree, tvb, offset, descriptor_len, ett_descriptor_video_control, &item, "VIDEO CONTROL INTERFACE DESCRIPTOR [%s]", subtype_str); } /* Common fields */ dissect_usb_descriptor_header(tree, tvb, offset, &vid_descriptor_type_vals_ext); subtype_item = proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_subtype, tvb, offset+2, 1, ENC_LITTLE_ENDIAN); offset += 3; if (subtype == VC_HEADER) { guint8 num_vs_interfaces; proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_bcdUVC, tvb, offset, 2, ENC_LITTLE_ENDIAN); proto_tree_add_item(tree, hf_usb_vid_ifdesc_wTotalLength, tvb, offset+2, 2, ENC_LITTLE_ENDIAN); proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_dwClockFrequency, tvb, offset+4, 4, ENC_LITTLE_ENDIAN); num_vs_interfaces = tvb_get_guint8(tvb, offset+8); proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_bInCollection, tvb, offset+8, 1, ENC_LITTLE_ENDIAN); if (num_vs_interfaces > 0) { proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_baInterfaceNr, tvb, offset+9, num_vs_interfaces, ENC_NA); } offset += 9 + num_vs_interfaces; } else if ((subtype == VC_INPUT_TERMINAL) || (subtype == VC_OUTPUT_TERMINAL)) { /* Fields common to input and output terminals */ entity_id = tvb_get_guint8(tvb, offset); terminal_type = tvb_get_letohs(tvb, offset+1); proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_terminal_id, tvb, offset, 1, ENC_LITTLE_ENDIAN); proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_terminal_type, tvb, offset+1, 2, ENC_LITTLE_ENDIAN); proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_assoc_terminal, tvb, offset+3, 1, ENC_LITTLE_ENDIAN); offset += 4; if (subtype == VC_OUTPUT_TERMINAL) { proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_src_id, tvb, offset, 1, ENC_LITTLE_ENDIAN); ++offset; } proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_iTerminal, tvb, offset, 1, ENC_LITTLE_ENDIAN); ++offset; if (subtype == VC_INPUT_TERMINAL) { if (terminal_type == ITT_CAMERA) { offset = dissect_usb_video_camera_terminal(tree, tvb, offset); } else if (terminal_type == ITT_MEDIA_TRANSPORT_INPUT) { /* @todo */ } } if (subtype == VC_OUTPUT_TERMINAL) { if (terminal_type == OTT_MEDIA_TRANSPORT_OUTPUT) { /* @todo */ } } } else { /* Field common to extension / processing / selector / encoding units */ entity_id = tvb_get_guint8(tvb, offset); proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_unit_id, tvb, offset, 1, ENC_LITTLE_ENDIAN); ++offset; if (subtype == VC_PROCESSING_UNIT) { offset = dissect_usb_video_processing_unit(tree, tvb, offset); } else if (subtype == VC_SELECTOR_UNIT) { offset = dissect_usb_video_selector_unit(tree, tvb, offset); } else if (subtype == VC_EXTENSION_UNIT) { offset = dissect_usb_video_extension_unit(tree, tvb, offset); } else if (subtype == VC_ENCODING_UNIT) { /* @todo UVC 1.5 */ } else { expert_add_info_format(pinfo, subtype_item, &ei_usb_vid_subtype_unknown, "Unknown VC subtype %u", subtype); } } /* Soak up descriptor bytes beyond those we know how to dissect */ if (offset < descriptor_len) { proto_tree_add_item(tree, hf_usb_vid_descriptor_data, tvb, offset, descriptor_len-offset, ENC_NA); /* offset = descriptor_len; */ } if (entity_id != 0) proto_item_append_text(item, " (Entity %d)", entity_id); if (subtype != VC_HEADER && usb_conv_info) { /* Switch to the usb_conv_info of the Video Control interface */ usb_conv_info = get_usb_iface_conv_info(pinfo, usb_conv_info->interfaceNum); video_conv_info = (video_conv_info_t *)usb_conv_info->class_data; if (!video_conv_info) { video_conv_info = wmem_new(wmem_file_scope(), video_conv_info_t); video_conv_info->entities = wmem_tree_new(wmem_file_scope()); usb_conv_info->class_data = video_conv_info; usb_conv_info->class_data_type = USB_CONV_VIDEO; } else if (usb_conv_info->class_data_type != USB_CONV_VIDEO) { /* Stop dissection if another USB type is in the conversation */ return descriptor_len; } entity = (video_entity_t*) wmem_tree_lookup32(video_conv_info->entities, entity_id); if (!entity) { entity = wmem_new(wmem_file_scope(), video_entity_t); entity->entityID = entity_id; entity->subtype = subtype; entity->terminalType = terminal_type; wmem_tree_insert32(video_conv_info->entities, entity_id, entity); } } return descriptor_len; }
167,155
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport) { u64 seq; __u32 hash[4]; struct keydata *keyptr = get_keyptr(); hash[0] = (__force u32)saddr; hash[1] = (__force u32)daddr; hash[2] = ((__force u16)sport << 16) + (__force u16)dport; hash[3] = keyptr->secret[11]; seq = half_md4_transform(hash, keyptr->secret); seq |= ((u64)keyptr->count) << (32 - HASH_BITS); seq += ktime_to_ns(ktime_get_real()); seq &= (1ull << 48) - 1; return seq; } Commit Message: net: Compute protocol sequence numbers and fragment IDs using MD5. Computers have become a lot faster since we compromised on the partial MD4 hash which we use currently for performance reasons. MD5 is a much safer choice, and is inline with both RFC1948 and other ISS generators (OpenBSD, Solaris, etc.) Furthermore, only having 24-bits of the sequence number be truly unpredictable is a very serious limitation. So the periodic regeneration and 8-bit counter have been removed. We compute and use a full 32-bit sequence number. For ipv6, DCCP was found to use a 32-bit truncated initial sequence number (it needs 43-bits) and that is fixed here as well. Reported-by: Dan Kaminsky <[email protected]> Tested-by: Willy Tarreau <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID:
u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
165,763
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int iowarrior_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct iowarrior *dev = NULL; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; int i; int retval = -ENOMEM; /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(struct iowarrior), GFP_KERNEL); if (dev == NULL) { dev_err(&interface->dev, "Out of memory\n"); return retval; } mutex_init(&dev->mutex); atomic_set(&dev->intr_idx, 0); atomic_set(&dev->read_idx, 0); spin_lock_init(&dev->intr_idx_lock); atomic_set(&dev->overflow_flag, 0); init_waitqueue_head(&dev->read_wait); atomic_set(&dev->write_busy, 0); init_waitqueue_head(&dev->write_wait); dev->udev = udev; dev->interface = interface; iface_desc = interface->cur_altsetting; dev->product_id = le16_to_cpu(udev->descriptor.idProduct); /* set up the endpoint information */ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (usb_endpoint_is_int_in(endpoint)) dev->int_in_endpoint = endpoint; if (usb_endpoint_is_int_out(endpoint)) /* this one will match for the IOWarrior56 only */ dev->int_out_endpoint = endpoint; } /* we have to check the report_size often, so remember it in the endianness suitable for our machine */ dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint); if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) && (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56)) /* IOWarrior56 has wMaxPacketSize different from report size */ dev->report_size = 7; /* create the urb and buffer for reading */ dev->int_in_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->int_in_urb) { dev_err(&interface->dev, "Couldn't allocate interrupt_in_urb\n"); goto error; } dev->int_in_buffer = kmalloc(dev->report_size, GFP_KERNEL); if (!dev->int_in_buffer) { dev_err(&interface->dev, "Couldn't allocate int_in_buffer\n"); goto error; } usb_fill_int_urb(dev->int_in_urb, dev->udev, usb_rcvintpipe(dev->udev, dev->int_in_endpoint->bEndpointAddress), dev->int_in_buffer, dev->report_size, iowarrior_callback, dev, dev->int_in_endpoint->bInterval); /* create an internal buffer for interrupt data from the device */ dev->read_queue = kmalloc(((dev->report_size + 1) * MAX_INTERRUPT_BUFFER), GFP_KERNEL); if (!dev->read_queue) { dev_err(&interface->dev, "Couldn't allocate read_queue\n"); goto error; } /* Get the serial-number of the chip */ memset(dev->chip_serial, 0x00, sizeof(dev->chip_serial)); usb_string(udev, udev->descriptor.iSerialNumber, dev->chip_serial, sizeof(dev->chip_serial)); if (strlen(dev->chip_serial) != 8) memset(dev->chip_serial, 0x00, sizeof(dev->chip_serial)); /* Set the idle timeout to 0, if this is interface 0 */ if (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) { usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x0A, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } /* allow device read and ioctl */ dev->present = 1; /* we can register the device now, as it is ready */ usb_set_intfdata(interface, dev); retval = usb_register_dev(interface, &iowarrior_class); if (retval) { /* something prevented us from registering this driver */ dev_err(&interface->dev, "Not able to get a minor for this device.\n"); usb_set_intfdata(interface, NULL); goto error; } dev->minor = interface->minor; /* let the user know what node this device is now attached to */ dev_info(&interface->dev, "IOWarrior product=0x%x, serial=%s interface=%d " "now attached to iowarrior%d\n", dev->product_id, dev->chip_serial, iface_desc->desc.bInterfaceNumber, dev->minor - IOWARRIOR_MINOR_BASE); return retval; error: iowarrior_delete(dev); return retval; } Commit Message: USB: iowarrior: fix oops with malicious USB descriptors The iowarrior driver expects at least one valid endpoint. If given malicious descriptors that specify 0 for the number of endpoints, it will crash in the probe function. Ensure there is at least one endpoint on the interface before using it. The full report of this issue can be found here: http://seclists.org/bugtraq/2016/Mar/87 Reported-by: Ralf Spenneberg <[email protected]> Cc: stable <[email protected]> Signed-off-by: Josh Boyer <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]> CWE ID:
static int iowarrior_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct iowarrior *dev = NULL; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; int i; int retval = -ENOMEM; /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(struct iowarrior), GFP_KERNEL); if (dev == NULL) { dev_err(&interface->dev, "Out of memory\n"); return retval; } mutex_init(&dev->mutex); atomic_set(&dev->intr_idx, 0); atomic_set(&dev->read_idx, 0); spin_lock_init(&dev->intr_idx_lock); atomic_set(&dev->overflow_flag, 0); init_waitqueue_head(&dev->read_wait); atomic_set(&dev->write_busy, 0); init_waitqueue_head(&dev->write_wait); dev->udev = udev; dev->interface = interface; iface_desc = interface->cur_altsetting; dev->product_id = le16_to_cpu(udev->descriptor.idProduct); if (iface_desc->desc.bNumEndpoints < 1) { dev_err(&interface->dev, "Invalid number of endpoints\n"); retval = -EINVAL; goto error; } /* set up the endpoint information */ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (usb_endpoint_is_int_in(endpoint)) dev->int_in_endpoint = endpoint; if (usb_endpoint_is_int_out(endpoint)) /* this one will match for the IOWarrior56 only */ dev->int_out_endpoint = endpoint; } /* we have to check the report_size often, so remember it in the endianness suitable for our machine */ dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint); if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) && (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56)) /* IOWarrior56 has wMaxPacketSize different from report size */ dev->report_size = 7; /* create the urb and buffer for reading */ dev->int_in_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->int_in_urb) { dev_err(&interface->dev, "Couldn't allocate interrupt_in_urb\n"); goto error; } dev->int_in_buffer = kmalloc(dev->report_size, GFP_KERNEL); if (!dev->int_in_buffer) { dev_err(&interface->dev, "Couldn't allocate int_in_buffer\n"); goto error; } usb_fill_int_urb(dev->int_in_urb, dev->udev, usb_rcvintpipe(dev->udev, dev->int_in_endpoint->bEndpointAddress), dev->int_in_buffer, dev->report_size, iowarrior_callback, dev, dev->int_in_endpoint->bInterval); /* create an internal buffer for interrupt data from the device */ dev->read_queue = kmalloc(((dev->report_size + 1) * MAX_INTERRUPT_BUFFER), GFP_KERNEL); if (!dev->read_queue) { dev_err(&interface->dev, "Couldn't allocate read_queue\n"); goto error; } /* Get the serial-number of the chip */ memset(dev->chip_serial, 0x00, sizeof(dev->chip_serial)); usb_string(udev, udev->descriptor.iSerialNumber, dev->chip_serial, sizeof(dev->chip_serial)); if (strlen(dev->chip_serial) != 8) memset(dev->chip_serial, 0x00, sizeof(dev->chip_serial)); /* Set the idle timeout to 0, if this is interface 0 */ if (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) { usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x0A, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } /* allow device read and ioctl */ dev->present = 1; /* we can register the device now, as it is ready */ usb_set_intfdata(interface, dev); retval = usb_register_dev(interface, &iowarrior_class); if (retval) { /* something prevented us from registering this driver */ dev_err(&interface->dev, "Not able to get a minor for this device.\n"); usb_set_intfdata(interface, NULL); goto error; } dev->minor = interface->minor; /* let the user know what node this device is now attached to */ dev_info(&interface->dev, "IOWarrior product=0x%x, serial=%s interface=%d " "now attached to iowarrior%d\n", dev->product_id, dev->chip_serial, iface_desc->desc.bInterfaceNumber, dev->minor - IOWARRIOR_MINOR_BASE); return retval; error: iowarrior_delete(dev); return retval; }
167,430
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: uint8_t* output() const { return output_ + BorderTop() * kOuterBlockSize + BorderLeft(); } Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478 DO NOT MERGE - libvpx: Pull from upstream Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06 BUG=23452792 Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec CWE ID: CWE-119
uint8_t* output() const { uint8_t *output() const { #if CONFIG_VP9_HIGHBITDEPTH if (UUT_->use_highbd_ == 0) { return output_ + BorderTop() * kOuterBlockSize + BorderLeft(); } else { return CONVERT_TO_BYTEPTR(output16_ + BorderTop() * kOuterBlockSize + BorderLeft()); } #else return output_ + BorderTop() * kOuterBlockSize + BorderLeft(); #endif } uint8_t *output_ref() const { #if CONFIG_VP9_HIGHBITDEPTH if (UUT_->use_highbd_ == 0) { return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft(); } else { return CONVERT_TO_BYTEPTR(output16_ref_ + BorderTop() * kOuterBlockSize + BorderLeft()); } #else return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft(); #endif } uint16_t lookup(uint8_t *list, int index) const { #if CONFIG_VP9_HIGHBITDEPTH if (UUT_->use_highbd_ == 0) { return list[index]; } else { return CONVERT_TO_SHORTPTR(list)[index]; } #else return list[index]; #endif } void assign_val(uint8_t *list, int index, uint16_t val) const { #if CONFIG_VP9_HIGHBITDEPTH if (UUT_->use_highbd_ == 0) { list[index] = (uint8_t) val; } else { CONVERT_TO_SHORTPTR(list)[index] = val; } #else list[index] = (uint8_t) val; #endif } void wrapper_filter_average_block2d_8_c(const uint8_t *src_ptr, const unsigned int src_stride, const int16_t *HFilter, const int16_t *VFilter, uint8_t *dst_ptr, unsigned int dst_stride, unsigned int output_width, unsigned int output_height) { #if CONFIG_VP9_HIGHBITDEPTH if (UUT_->use_highbd_ == 0) { filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr, dst_stride, output_width, output_height); } else { highbd_filter_average_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr), src_stride, HFilter, VFilter, CONVERT_TO_SHORTPTR(dst_ptr), dst_stride, output_width, output_height, UUT_->use_highbd_); } #else filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr, dst_stride, output_width, output_height); #endif } void wrapper_filter_block2d_8_c(const uint8_t *src_ptr, const unsigned int src_stride, const int16_t *HFilter, const int16_t *VFilter, uint8_t *dst_ptr, unsigned int dst_stride, unsigned int output_width, unsigned int output_height) { #if CONFIG_VP9_HIGHBITDEPTH if (UUT_->use_highbd_ == 0) { filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr, dst_stride, output_width, output_height); } else { highbd_filter_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr), src_stride, HFilter, VFilter, CONVERT_TO_SHORTPTR(dst_ptr), dst_stride, output_width, output_height, UUT_->use_highbd_); } #else filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr, dst_stride, output_width, output_height); #endif }
174,511
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs) { u16 *op; int size; unicode_t u; op = pwcs; while (*s && len > 0) { if (*s & 0x80) { size = utf8_to_utf32(s, len, &u); if (size < 0) return -EINVAL; if (u >= PLANE_SIZE) { u -= PLANE_SIZE; *op++ = (wchar_t) (SURROGATE_PAIR | ((u >> 10) & SURROGATE_BITS)); *op++ = (wchar_t) (SURROGATE_PAIR | SURROGATE_LOW | (u & SURROGATE_BITS)); } else { *op++ = (wchar_t) u; } s += size; len -= size; } else { *op++ = *s++; len--; } } return op - pwcs; } Commit Message: NLS: improve UTF8 -> UTF16 string conversion routine The utf8s_to_utf16s conversion routine needs to be improved. Unlike its utf16s_to_utf8s sibling, it doesn't accept arguments specifying the maximum length of the output buffer or the endianness of its 16-bit output. This patch (as1501) adds the two missing arguments, and adjusts the only two places in the kernel where the function is called. A follow-on patch will add a third caller that does utilize the new capabilities. The two conversion routines are still annoyingly inconsistent in the way they handle invalid byte combinations. But that's a subject for a different patch. Signed-off-by: Alan Stern <[email protected]> CC: Clemens Ladisch <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]> CWE ID: CWE-119
int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs) static inline void put_utf16(wchar_t *s, unsigned c, enum utf16_endian endian) { switch (endian) { default: *s = (wchar_t) c; break; case UTF16_LITTLE_ENDIAN: *s = __cpu_to_le16(c); break; case UTF16_BIG_ENDIAN: *s = __cpu_to_be16(c); break; } } int utf8s_to_utf16s(const u8 *s, int len, enum utf16_endian endian, wchar_t *pwcs, int maxlen) { u16 *op; int size; unicode_t u; op = pwcs; while (len > 0 && maxlen > 0 && *s) { if (*s & 0x80) { size = utf8_to_utf32(s, len, &u); if (size < 0) return -EINVAL; s += size; len -= size; if (u >= PLANE_SIZE) { if (maxlen < 2) break; u -= PLANE_SIZE; put_utf16(op++, SURROGATE_PAIR | ((u >> 10) & SURROGATE_BITS), endian); put_utf16(op++, SURROGATE_PAIR | SURROGATE_LOW | (u & SURROGATE_BITS), endian); maxlen -= 2; } else { put_utf16(op++, u, endian); maxlen--; } } else { put_utf16(op++, *s++, endian); len--; maxlen--; } } return op - pwcs; }
166,125
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void DrawingBuffer::RestoreAllState() { client_->DrawingBufferClientRestoreScissorTest(); client_->DrawingBufferClientRestoreMaskAndClearValues(); client_->DrawingBufferClientRestorePixelPackAlignment(); client_->DrawingBufferClientRestoreTexture2DBinding(); client_->DrawingBufferClientRestoreRenderbufferBinding(); client_->DrawingBufferClientRestoreFramebufferBinding(); client_->DrawingBufferClientRestorePixelUnpackBufferBinding(); } Commit Message: Reset ES3 pixel pack parameters and PIXEL_PACK_BUFFER binding in DrawingBuffer before ReadPixels() and recover them later. BUG=740603 TEST=new conformance test [email protected],[email protected] Change-Id: I3ea54c6cc34f34e249f7c8b9f792d93c5e1958f4 Reviewed-on: https://chromium-review.googlesource.com/570840 Reviewed-by: Antoine Labour <[email protected]> Reviewed-by: Kenneth Russell <[email protected]> Commit-Queue: Zhenyao Mo <[email protected]> Cr-Commit-Position: refs/heads/master@{#486518} CWE ID: CWE-119
void DrawingBuffer::RestoreAllState() { client_->DrawingBufferClientRestoreScissorTest(); client_->DrawingBufferClientRestoreMaskAndClearValues(); client_->DrawingBufferClientRestorePixelPackParameters(); client_->DrawingBufferClientRestoreTexture2DBinding(); client_->DrawingBufferClientRestoreRenderbufferBinding(); client_->DrawingBufferClientRestoreFramebufferBinding(); client_->DrawingBufferClientRestorePixelUnpackBufferBinding(); client_->DrawingBufferClientRestorePixelPackBufferBinding(); }
172,295
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride) { nsc_encode_argb_to_aycocg(context, bmpdata, rowstride); if (context->ChromaSubsamplingLevel) { nsc_encode_subsampling(context); } } Commit Message: Fixed CVE-2018-8788 Thanks to Eyal Itkin from Check Point Software Technologies. CWE ID: CWE-787
void nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride) BOOL nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride) { if (!context || !bmpdata || (rowstride == 0)) return FALSE; if (!nsc_encode_argb_to_aycocg(context, bmpdata, rowstride)) return FALSE; if (context->ChromaSubsamplingLevel) { if (!nsc_encode_subsampling(context)) return FALSE; } return TRUE; }
169,287
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int propagate_one(struct mount *m) { struct mount *child; int type; /* skip ones added by this propagate_mnt() */ if (IS_MNT_NEW(m)) return 0; /* skip if mountpoint isn't covered by it */ if (!is_subdir(mp->m_dentry, m->mnt.mnt_root)) return 0; if (peers(m, last_dest)) { type = CL_MAKE_SHARED; } else { struct mount *n, *p; bool done; for (n = m; ; n = p) { p = n->mnt_master; if (p == dest_master || IS_MNT_MARKED(p)) break; } do { struct mount *parent = last_source->mnt_parent; if (last_source == first_source) break; done = parent->mnt_master == p; if (done && peers(n, parent)) break; last_source = last_source->mnt_master; } while (!done); type = CL_SLAVE; /* beginning of peer group among the slaves? */ if (IS_MNT_SHARED(m)) type |= CL_MAKE_SHARED; } /* Notice when we are propagating across user namespaces */ if (m->mnt_ns->user_ns != user_ns) type |= CL_UNPRIVILEGED; child = copy_tree(last_source, last_source->mnt.mnt_root, type); if (IS_ERR(child)) return PTR_ERR(child); child->mnt.mnt_flags &= ~MNT_LOCKED; mnt_set_mountpoint(m, mp, child); last_dest = m; last_source = child; if (m->mnt_master != dest_master) { read_seqlock_excl(&mount_lock); SET_MNT_MARK(m->mnt_master); read_sequnlock_excl(&mount_lock); } hlist_add_head(&child->mnt_hash, list); return 0; } Commit Message: mnt: Add a per mount namespace limit on the number of mounts CAI Qian <[email protected]> pointed out that the semantics of shared subtrees make it possible to create an exponentially increasing number of mounts in a mount namespace. mkdir /tmp/1 /tmp/2 mount --make-rshared / for i in $(seq 1 20) ; do mount --bind /tmp/1 /tmp/2 ; done Will create create 2^20 or 1048576 mounts, which is a practical problem as some people have managed to hit this by accident. As such CVE-2016-6213 was assigned. Ian Kent <[email protected]> described the situation for autofs users as follows: > The number of mounts for direct mount maps is usually not very large because of > the way they are implemented, large direct mount maps can have performance > problems. There can be anywhere from a few (likely case a few hundred) to less > than 10000, plus mounts that have been triggered and not yet expired. > > Indirect mounts have one autofs mount at the root plus the number of mounts that > have been triggered and not yet expired. > > The number of autofs indirect map entries can range from a few to the common > case of several thousand and in rare cases up to between 30000 and 50000. I've > not heard of people with maps larger than 50000 entries. > > The larger the number of map entries the greater the possibility for a large > number of active mounts so it's not hard to expect cases of a 1000 or somewhat > more active mounts. So I am setting the default number of mounts allowed per mount namespace at 100,000. This is more than enough for any use case I know of, but small enough to quickly stop an exponential increase in mounts. Which should be perfect to catch misconfigurations and malfunctioning programs. For anyone who needs a higher limit this can be changed by writing to the new /proc/sys/fs/mount-max sysctl. Tested-by: CAI Qian <[email protected]> Signed-off-by: "Eric W. Biederman" <[email protected]> CWE ID: CWE-400
static int propagate_one(struct mount *m) { struct mount *child; int type; /* skip ones added by this propagate_mnt() */ if (IS_MNT_NEW(m)) return 0; /* skip if mountpoint isn't covered by it */ if (!is_subdir(mp->m_dentry, m->mnt.mnt_root)) return 0; if (peers(m, last_dest)) { type = CL_MAKE_SHARED; } else { struct mount *n, *p; bool done; for (n = m; ; n = p) { p = n->mnt_master; if (p == dest_master || IS_MNT_MARKED(p)) break; } do { struct mount *parent = last_source->mnt_parent; if (last_source == first_source) break; done = parent->mnt_master == p; if (done && peers(n, parent)) break; last_source = last_source->mnt_master; } while (!done); type = CL_SLAVE; /* beginning of peer group among the slaves? */ if (IS_MNT_SHARED(m)) type |= CL_MAKE_SHARED; } /* Notice when we are propagating across user namespaces */ if (m->mnt_ns->user_ns != user_ns) type |= CL_UNPRIVILEGED; child = copy_tree(last_source, last_source->mnt.mnt_root, type); if (IS_ERR(child)) return PTR_ERR(child); child->mnt.mnt_flags &= ~MNT_LOCKED; mnt_set_mountpoint(m, mp, child); last_dest = m; last_source = child; if (m->mnt_master != dest_master) { read_seqlock_excl(&mount_lock); SET_MNT_MARK(m->mnt_master); read_sequnlock_excl(&mount_lock); } hlist_add_head(&child->mnt_hash, list); return count_mounts(m->mnt_ns, child); }
167,012
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: spnego_gss_unwrap_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle, int *conf_state, gss_qop_t *qop_state, gss_iov_buffer_desc *iov, int iov_count) { OM_uint32 ret; ret = gss_unwrap_iov(minor_status, context_handle, conf_state, qop_state, iov, iov_count); return (ret); } Commit Message: Fix SPNEGO context aliasing bugs [CVE-2015-2695] The SPNEGO mechanism currently replaces its context handle with the mechanism context handle upon establishment, under the assumption that most GSS functions are only called after context establishment. This assumption is incorrect, and can lead to aliasing violations for some programs. Maintain the SPNEGO context structure after context establishment and refer to it in all GSS methods. Add initiate and opened flags to the SPNEGO context structure for use in gss_inquire_context() prior to context establishment. CVE-2015-2695: In MIT krb5 1.5 and later, applications which call gss_inquire_context() on a partially-established SPNEGO context can cause the GSS-API library to read from a pointer using the wrong type, generally causing a process crash. This bug may go unnoticed, because the most common SPNEGO authentication scenario establishes the context after just one call to gss_accept_sec_context(). Java server applications using the native JGSS provider are vulnerable to this bug. A carefully crafted SPNEGO packet might allow the gss_inquire_context() call to succeed with attacker-determined results, but applications should not make access control decisions based on gss_inquire_context() results prior to context establishment. CVSSv2 Vector: AV:N/AC:M/Au:N/C:N/I:N/A:C/E:POC/RL:OF/RC:C [[email protected]: several bugfixes, style changes, and edge-case behavior changes; commit message and CVE description] ticket: 8244 target_version: 1.14 tags: pullup CWE ID: CWE-18
spnego_gss_unwrap_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle, int *conf_state, gss_qop_t *qop_state, gss_iov_buffer_desc *iov, int iov_count) { OM_uint32 ret; spnego_gss_ctx_id_t sc = (spnego_gss_ctx_id_t)context_handle; if (sc->ctx_handle == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); ret = gss_unwrap_iov(minor_status, sc->ctx_handle, conf_state, qop_state, iov, iov_count); return (ret); }
166,668
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void HistogramsCallback() { MockHistogramsCallback(); QuitMessageLoop(); } Commit Message: Migrate ServiceProcessControl tests off of QuitCurrent*Deprecated(). Bug: 844016 Change-Id: I9403b850456c8ee06cd2539f7cec9599302e81a0 Reviewed-on: https://chromium-review.googlesource.com/1126576 Commit-Queue: Wez <[email protected]> Reviewed-by: Avi Drissman <[email protected]> Cr-Commit-Position: refs/heads/master@{#573131} CWE ID: CWE-94
void HistogramsCallback() { void HistogramsCallback(base::RepeatingClosure on_done) { MockHistogramsCallback(); on_done.Run(); }
172,050
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void WebContentsImpl::DidFailProvisionalLoadWithError( RenderViewHost* render_view_host, const ViewHostMsg_DidFailProvisionalLoadWithError_Params& params) { VLOG(1) << "Failed Provisional Load: " << params.url.possibly_invalid_spec() << ", error_code: " << params.error_code << ", error_description: " << params.error_description << ", is_main_frame: " << params.is_main_frame << ", showing_repost_interstitial: " << params.showing_repost_interstitial << ", frame_id: " << params.frame_id; GURL validated_url(params.url); RenderProcessHost* render_process_host = render_view_host->GetProcess(); RenderViewHost::FilterURL(render_process_host, false, &validated_url); if (net::ERR_ABORTED == params.error_code) { if (ShowingInterstitialPage()) { LOG(WARNING) << "Discarding message during interstitial."; return; } render_manager_.RendererAbortedProvisionalLoad(render_view_host); } FOR_EACH_OBSERVER(WebContentsObserver, observers_, DidFailProvisionalLoad(params.frame_id, params.is_main_frame, validated_url, params.error_code, params.error_description, render_view_host)); } Commit Message: Delete unneeded pending entries in DidFailProvisionalLoad to prevent a spoof. BUG=280512 BUG=278899 TEST=See bug for repro steps. Review URL: https://chromiumcodereview.appspot.com/23978003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@222146 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID:
void WebContentsImpl::DidFailProvisionalLoadWithError( RenderViewHost* render_view_host, const ViewHostMsg_DidFailProvisionalLoadWithError_Params& params) { VLOG(1) << "Failed Provisional Load: " << params.url.possibly_invalid_spec() << ", error_code: " << params.error_code << ", error_description: " << params.error_description << ", is_main_frame: " << params.is_main_frame << ", showing_repost_interstitial: " << params.showing_repost_interstitial << ", frame_id: " << params.frame_id; GURL validated_url(params.url); RenderProcessHost* render_process_host = render_view_host->GetProcess(); RenderViewHost::FilterURL(render_process_host, false, &validated_url); if (net::ERR_ABORTED == params.error_code) { if (ShowingInterstitialPage()) { LOG(WARNING) << "Discarding message during interstitial."; return; } render_manager_.RendererAbortedProvisionalLoad(render_view_host); } // Do not usually clear the pending entry if one exists, so that the user's // typed URL is not lost when a navigation fails or is aborted. However, in // cases that we don't show the pending entry (e.g., renderer-initiated // navigations in an existing tab), we don't keep it around. That prevents // spoofs on in-page navigations that don't go through // DidStartProvisionalLoadForFrame. // In general, we allow the view to clear the pending entry and typed URL if // the user requests (e.g., hitting Escape with focus in the address bar). // Note: don't touch the transient entry, since an interstitial may exist. if (controller_.GetPendingEntry() != controller_.GetVisibleEntry()) controller_.DiscardPendingEntry(); FOR_EACH_OBSERVER(WebContentsObserver, observers_, DidFailProvisionalLoad(params.frame_id, params.is_main_frame, validated_url, params.error_code, params.error_description, render_view_host)); }
171,189
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: v8::Local<v8::Object> V8InjectedScriptHost::create(v8::Local<v8::Context> context, V8InspectorImpl* inspector) { v8::Isolate* isolate = inspector->isolate(); v8::Local<v8::Object> injectedScriptHost = v8::Object::New(isolate); v8::Local<v8::External> debuggerExternal = v8::External::New(isolate, inspector); setFunctionProperty(context, injectedScriptHost, "internalConstructorName", V8InjectedScriptHost::internalConstructorNameCallback, debuggerExternal); setFunctionProperty(context, injectedScriptHost, "formatAccessorsAsProperties", V8InjectedScriptHost::formatAccessorsAsProperties, debuggerExternal); setFunctionProperty(context, injectedScriptHost, "subtype", V8InjectedScriptHost::subtypeCallback, debuggerExternal); setFunctionProperty(context, injectedScriptHost, "getInternalProperties", V8InjectedScriptHost::getInternalPropertiesCallback, debuggerExternal); setFunctionProperty(context, injectedScriptHost, "objectHasOwnProperty", V8InjectedScriptHost::objectHasOwnPropertyCallback, debuggerExternal); setFunctionProperty(context, injectedScriptHost, "bind", V8InjectedScriptHost::bindCallback, debuggerExternal); setFunctionProperty(context, injectedScriptHost, "proxyTargetValue", V8InjectedScriptHost::proxyTargetValueCallback, debuggerExternal); return injectedScriptHost; } Commit Message: [DevTools] Copy objects from debugger context to inspected context properly. BUG=637594 Review-Url: https://codereview.chromium.org/2253643002 Cr-Commit-Position: refs/heads/master@{#412436} CWE ID: CWE-79
v8::Local<v8::Object> V8InjectedScriptHost::create(v8::Local<v8::Context> context, V8InspectorImpl* inspector) { v8::Isolate* isolate = inspector->isolate(); v8::Local<v8::Object> injectedScriptHost = v8::Object::New(isolate); bool success = injectedScriptHost->SetPrototype(context, v8::Null(isolate)).FromMaybe(false); DCHECK(success); v8::Local<v8::External> debuggerExternal = v8::External::New(isolate, inspector); setFunctionProperty(context, injectedScriptHost, "internalConstructorName", V8InjectedScriptHost::internalConstructorNameCallback, debuggerExternal); setFunctionProperty(context, injectedScriptHost, "formatAccessorsAsProperties", V8InjectedScriptHost::formatAccessorsAsProperties, debuggerExternal); setFunctionProperty(context, injectedScriptHost, "subtype", V8InjectedScriptHost::subtypeCallback, debuggerExternal); setFunctionProperty(context, injectedScriptHost, "getInternalProperties", V8InjectedScriptHost::getInternalPropertiesCallback, debuggerExternal); setFunctionProperty(context, injectedScriptHost, "objectHasOwnProperty", V8InjectedScriptHost::objectHasOwnPropertyCallback, debuggerExternal); setFunctionProperty(context, injectedScriptHost, "bind", V8InjectedScriptHost::bindCallback, debuggerExternal); setFunctionProperty(context, injectedScriptHost, "proxyTargetValue", V8InjectedScriptHost::proxyTargetValueCallback, debuggerExternal); return injectedScriptHost; }
172,069
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static __init int sctp_init(void) { int i; int status = -EINVAL; unsigned long goal; unsigned long limit; int max_share; int order; sock_skb_cb_check_size(sizeof(struct sctp_ulpevent)); /* Allocate bind_bucket and chunk caches. */ status = -ENOBUFS; sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket", sizeof(struct sctp_bind_bucket), 0, SLAB_HWCACHE_ALIGN, NULL); if (!sctp_bucket_cachep) goto out; sctp_chunk_cachep = kmem_cache_create("sctp_chunk", sizeof(struct sctp_chunk), 0, SLAB_HWCACHE_ALIGN, NULL); if (!sctp_chunk_cachep) goto err_chunk_cachep; status = percpu_counter_init(&sctp_sockets_allocated, 0, GFP_KERNEL); if (status) goto err_percpu_counter_init; /* Implementation specific variables. */ /* Initialize default stream count setup information. */ sctp_max_instreams = SCTP_DEFAULT_INSTREAMS; sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS; /* Initialize handle used for association ids. */ idr_init(&sctp_assocs_id); limit = nr_free_buffer_pages() / 8; limit = max(limit, 128UL); sysctl_sctp_mem[0] = limit / 4 * 3; sysctl_sctp_mem[1] = limit; sysctl_sctp_mem[2] = sysctl_sctp_mem[0] * 2; /* Set per-socket limits to no more than 1/128 the pressure threshold*/ limit = (sysctl_sctp_mem[1]) << (PAGE_SHIFT - 7); max_share = min(4UL*1024*1024, limit); sysctl_sctp_rmem[0] = SK_MEM_QUANTUM; /* give each asoc 1 page min */ sysctl_sctp_rmem[1] = 1500 * SKB_TRUESIZE(1); sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share); sysctl_sctp_wmem[0] = SK_MEM_QUANTUM; sysctl_sctp_wmem[1] = 16*1024; sysctl_sctp_wmem[2] = max(64*1024, max_share); /* Size and allocate the association hash table. * The methodology is similar to that of the tcp hash tables. */ if (totalram_pages >= (128 * 1024)) goal = totalram_pages >> (22 - PAGE_SHIFT); else goal = totalram_pages >> (24 - PAGE_SHIFT); for (order = 0; (1UL << order) < goal; order++) ; do { sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE / sizeof(struct sctp_hashbucket); if ((sctp_assoc_hashsize > (64 * 1024)) && order > 0) continue; sctp_assoc_hashtable = (struct sctp_hashbucket *) __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order); } while (!sctp_assoc_hashtable && --order > 0); if (!sctp_assoc_hashtable) { pr_err("Failed association hash alloc\n"); status = -ENOMEM; goto err_ahash_alloc; } for (i = 0; i < sctp_assoc_hashsize; i++) { rwlock_init(&sctp_assoc_hashtable[i].lock); INIT_HLIST_HEAD(&sctp_assoc_hashtable[i].chain); } /* Allocate and initialize the endpoint hash table. */ sctp_ep_hashsize = 64; sctp_ep_hashtable = kmalloc(64 * sizeof(struct sctp_hashbucket), GFP_KERNEL); if (!sctp_ep_hashtable) { pr_err("Failed endpoint_hash alloc\n"); status = -ENOMEM; goto err_ehash_alloc; } for (i = 0; i < sctp_ep_hashsize; i++) { rwlock_init(&sctp_ep_hashtable[i].lock); INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain); } /* Allocate and initialize the SCTP port hash table. */ do { sctp_port_hashsize = (1UL << order) * PAGE_SIZE / sizeof(struct sctp_bind_hashbucket); if ((sctp_port_hashsize > (64 * 1024)) && order > 0) continue; sctp_port_hashtable = (struct sctp_bind_hashbucket *) __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order); } while (!sctp_port_hashtable && --order > 0); if (!sctp_port_hashtable) { pr_err("Failed bind hash alloc\n"); status = -ENOMEM; goto err_bhash_alloc; } for (i = 0; i < sctp_port_hashsize; i++) { spin_lock_init(&sctp_port_hashtable[i].lock); INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain); } pr_info("Hash tables configured (established %d bind %d)\n", sctp_assoc_hashsize, sctp_port_hashsize); sctp_sysctl_register(); INIT_LIST_HEAD(&sctp_address_families); sctp_v4_pf_init(); sctp_v6_pf_init(); status = sctp_v4_protosw_init(); if (status) goto err_protosw_init; status = sctp_v6_protosw_init(); if (status) goto err_v6_protosw_init; status = register_pernet_subsys(&sctp_net_ops); if (status) goto err_register_pernet_subsys; status = sctp_v4_add_protocol(); if (status) goto err_add_protocol; /* Register SCTP with inet6 layer. */ status = sctp_v6_add_protocol(); if (status) goto err_v6_add_protocol; out: return status; err_v6_add_protocol: sctp_v4_del_protocol(); err_add_protocol: unregister_pernet_subsys(&sctp_net_ops); err_register_pernet_subsys: sctp_v6_protosw_exit(); err_v6_protosw_init: sctp_v4_protosw_exit(); err_protosw_init: sctp_v4_pf_exit(); sctp_v6_pf_exit(); sctp_sysctl_unregister(); free_pages((unsigned long)sctp_port_hashtable, get_order(sctp_port_hashsize * sizeof(struct sctp_bind_hashbucket))); err_bhash_alloc: kfree(sctp_ep_hashtable); err_ehash_alloc: free_pages((unsigned long)sctp_assoc_hashtable, get_order(sctp_assoc_hashsize * sizeof(struct sctp_hashbucket))); err_ahash_alloc: percpu_counter_destroy(&sctp_sockets_allocated); err_percpu_counter_init: kmem_cache_destroy(sctp_chunk_cachep); err_chunk_cachep: kmem_cache_destroy(sctp_bucket_cachep); goto out; } Commit Message: sctp: fix race on protocol/netns initialization Consider sctp module is unloaded and is being requested because an user is creating a sctp socket. During initialization, sctp will add the new protocol type and then initialize pernet subsys: status = sctp_v4_protosw_init(); if (status) goto err_protosw_init; status = sctp_v6_protosw_init(); if (status) goto err_v6_protosw_init; status = register_pernet_subsys(&sctp_net_ops); The problem is that after those calls to sctp_v{4,6}_protosw_init(), it is possible for userspace to create SCTP sockets like if the module is already fully loaded. If that happens, one of the possible effects is that we will have readers for net->sctp.local_addr_list list earlier than expected and sctp_net_init() does not take precautions while dealing with that list, leading to a potential panic but not limited to that, as sctp_sock_init() will copy a bunch of blank/partially initialized values from net->sctp. The race happens like this: CPU 0 | CPU 1 socket() | __sock_create | socket() inet_create | __sock_create list_for_each_entry_rcu( | answer, &inetsw[sock->type], | list) { | inet_create /* no hits */ | if (unlikely(err)) { | ... | request_module() | /* socket creation is blocked | * the module is fully loaded | */ | sctp_init | sctp_v4_protosw_init | inet_register_protosw | list_add_rcu(&p->list, | last_perm); | | list_for_each_entry_rcu( | answer, &inetsw[sock->type], sctp_v6_protosw_init | list) { | /* hit, so assumes protocol | * is already loaded | */ | /* socket creation continues | * before netns is initialized | */ register_pernet_subsys | Simply inverting the initialization order between register_pernet_subsys() and sctp_v4_protosw_init() is not possible because register_pernet_subsys() will create a control sctp socket, so the protocol must be already visible by then. Deferring the socket creation to a work-queue is not good specially because we loose the ability to handle its errors. So, as suggested by Vlad, the fix is to split netns initialization in two moments: defaults and control socket, so that the defaults are already loaded by when we register the protocol, while control socket initialization is kept at the same moment it is today. Fixes: 4db67e808640 ("sctp: Make the address lists per network namespace") Signed-off-by: Vlad Yasevich <[email protected]> Signed-off-by: Marcelo Ricardo Leitner <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-119
static __init int sctp_init(void) { int i; int status = -EINVAL; unsigned long goal; unsigned long limit; int max_share; int order; sock_skb_cb_check_size(sizeof(struct sctp_ulpevent)); /* Allocate bind_bucket and chunk caches. */ status = -ENOBUFS; sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket", sizeof(struct sctp_bind_bucket), 0, SLAB_HWCACHE_ALIGN, NULL); if (!sctp_bucket_cachep) goto out; sctp_chunk_cachep = kmem_cache_create("sctp_chunk", sizeof(struct sctp_chunk), 0, SLAB_HWCACHE_ALIGN, NULL); if (!sctp_chunk_cachep) goto err_chunk_cachep; status = percpu_counter_init(&sctp_sockets_allocated, 0, GFP_KERNEL); if (status) goto err_percpu_counter_init; /* Implementation specific variables. */ /* Initialize default stream count setup information. */ sctp_max_instreams = SCTP_DEFAULT_INSTREAMS; sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS; /* Initialize handle used for association ids. */ idr_init(&sctp_assocs_id); limit = nr_free_buffer_pages() / 8; limit = max(limit, 128UL); sysctl_sctp_mem[0] = limit / 4 * 3; sysctl_sctp_mem[1] = limit; sysctl_sctp_mem[2] = sysctl_sctp_mem[0] * 2; /* Set per-socket limits to no more than 1/128 the pressure threshold*/ limit = (sysctl_sctp_mem[1]) << (PAGE_SHIFT - 7); max_share = min(4UL*1024*1024, limit); sysctl_sctp_rmem[0] = SK_MEM_QUANTUM; /* give each asoc 1 page min */ sysctl_sctp_rmem[1] = 1500 * SKB_TRUESIZE(1); sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share); sysctl_sctp_wmem[0] = SK_MEM_QUANTUM; sysctl_sctp_wmem[1] = 16*1024; sysctl_sctp_wmem[2] = max(64*1024, max_share); /* Size and allocate the association hash table. * The methodology is similar to that of the tcp hash tables. */ if (totalram_pages >= (128 * 1024)) goal = totalram_pages >> (22 - PAGE_SHIFT); else goal = totalram_pages >> (24 - PAGE_SHIFT); for (order = 0; (1UL << order) < goal; order++) ; do { sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE / sizeof(struct sctp_hashbucket); if ((sctp_assoc_hashsize > (64 * 1024)) && order > 0) continue; sctp_assoc_hashtable = (struct sctp_hashbucket *) __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order); } while (!sctp_assoc_hashtable && --order > 0); if (!sctp_assoc_hashtable) { pr_err("Failed association hash alloc\n"); status = -ENOMEM; goto err_ahash_alloc; } for (i = 0; i < sctp_assoc_hashsize; i++) { rwlock_init(&sctp_assoc_hashtable[i].lock); INIT_HLIST_HEAD(&sctp_assoc_hashtable[i].chain); } /* Allocate and initialize the endpoint hash table. */ sctp_ep_hashsize = 64; sctp_ep_hashtable = kmalloc(64 * sizeof(struct sctp_hashbucket), GFP_KERNEL); if (!sctp_ep_hashtable) { pr_err("Failed endpoint_hash alloc\n"); status = -ENOMEM; goto err_ehash_alloc; } for (i = 0; i < sctp_ep_hashsize; i++) { rwlock_init(&sctp_ep_hashtable[i].lock); INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain); } /* Allocate and initialize the SCTP port hash table. */ do { sctp_port_hashsize = (1UL << order) * PAGE_SIZE / sizeof(struct sctp_bind_hashbucket); if ((sctp_port_hashsize > (64 * 1024)) && order > 0) continue; sctp_port_hashtable = (struct sctp_bind_hashbucket *) __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order); } while (!sctp_port_hashtable && --order > 0); if (!sctp_port_hashtable) { pr_err("Failed bind hash alloc\n"); status = -ENOMEM; goto err_bhash_alloc; } for (i = 0; i < sctp_port_hashsize; i++) { spin_lock_init(&sctp_port_hashtable[i].lock); INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain); } pr_info("Hash tables configured (established %d bind %d)\n", sctp_assoc_hashsize, sctp_port_hashsize); sctp_sysctl_register(); INIT_LIST_HEAD(&sctp_address_families); sctp_v4_pf_init(); sctp_v6_pf_init(); status = register_pernet_subsys(&sctp_defaults_ops); if (status) goto err_register_defaults; status = sctp_v4_protosw_init(); if (status) goto err_protosw_init; status = sctp_v6_protosw_init(); if (status) goto err_v6_protosw_init; status = register_pernet_subsys(&sctp_ctrlsock_ops); if (status) goto err_register_ctrlsock; status = sctp_v4_add_protocol(); if (status) goto err_add_protocol; /* Register SCTP with inet6 layer. */ status = sctp_v6_add_protocol(); if (status) goto err_v6_add_protocol; out: return status; err_v6_add_protocol: sctp_v4_del_protocol(); err_add_protocol: unregister_pernet_subsys(&sctp_ctrlsock_ops); err_register_ctrlsock: sctp_v6_protosw_exit(); err_v6_protosw_init: sctp_v4_protosw_exit(); err_protosw_init: unregister_pernet_subsys(&sctp_defaults_ops); err_register_defaults: sctp_v4_pf_exit(); sctp_v6_pf_exit(); sctp_sysctl_unregister(); free_pages((unsigned long)sctp_port_hashtable, get_order(sctp_port_hashsize * sizeof(struct sctp_bind_hashbucket))); err_bhash_alloc: kfree(sctp_ep_hashtable); err_ehash_alloc: free_pages((unsigned long)sctp_assoc_hashtable, get_order(sctp_assoc_hashsize * sizeof(struct sctp_hashbucket))); err_ahash_alloc: percpu_counter_destroy(&sctp_sockets_allocated); err_percpu_counter_init: kmem_cache_destroy(sctp_chunk_cachep); err_chunk_cachep: kmem_cache_destroy(sctp_bucket_cachep); goto out; }
166,606
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: int snd_card_new(struct device *parent, int idx, const char *xid, struct module *module, int extra_size, struct snd_card **card_ret) { struct snd_card *card; int err; if (snd_BUG_ON(!card_ret)) return -EINVAL; *card_ret = NULL; if (extra_size < 0) extra_size = 0; card = kzalloc(sizeof(*card) + extra_size, GFP_KERNEL); if (!card) return -ENOMEM; if (extra_size > 0) card->private_data = (char *)card + sizeof(struct snd_card); if (xid) strlcpy(card->id, xid, sizeof(card->id)); err = 0; mutex_lock(&snd_card_mutex); if (idx < 0) /* first check the matching module-name slot */ idx = get_slot_from_bitmask(idx, module_slot_match, module); if (idx < 0) /* if not matched, assign an empty slot */ idx = get_slot_from_bitmask(idx, check_empty_slot, module); if (idx < 0) err = -ENODEV; else if (idx < snd_ecards_limit) { if (test_bit(idx, snd_cards_lock)) err = -EBUSY; /* invalid */ } else if (idx >= SNDRV_CARDS) err = -ENODEV; if (err < 0) { mutex_unlock(&snd_card_mutex); dev_err(parent, "cannot find the slot for index %d (range 0-%i), error: %d\n", idx, snd_ecards_limit - 1, err); kfree(card); return err; } set_bit(idx, snd_cards_lock); /* lock it */ if (idx >= snd_ecards_limit) snd_ecards_limit = idx + 1; /* increase the limit */ mutex_unlock(&snd_card_mutex); card->dev = parent; card->number = idx; card->module = module; INIT_LIST_HEAD(&card->devices); init_rwsem(&card->controls_rwsem); rwlock_init(&card->ctl_files_rwlock); INIT_LIST_HEAD(&card->controls); INIT_LIST_HEAD(&card->ctl_files); spin_lock_init(&card->files_lock); INIT_LIST_HEAD(&card->files_list); #ifdef CONFIG_PM mutex_init(&card->power_lock); init_waitqueue_head(&card->power_sleep); #endif device_initialize(&card->card_dev); card->card_dev.parent = parent; card->card_dev.class = sound_class; card->card_dev.release = release_card_device; card->card_dev.groups = card_dev_attr_groups; err = kobject_set_name(&card->card_dev.kobj, "card%d", idx); if (err < 0) goto __error; /* the control interface cannot be accessed from the user space until */ /* snd_cards_bitmask and snd_cards are set with snd_card_register */ err = snd_ctl_create(card); if (err < 0) { dev_err(parent, "unable to register control minors\n"); goto __error; } err = snd_info_card_create(card); if (err < 0) { dev_err(parent, "unable to create card info\n"); goto __error_ctl; } *card_ret = card; return 0; __error_ctl: snd_device_free_all(card); __error: put_device(&card->card_dev); return err; } Commit Message: ALSA: control: Protect user controls against concurrent access The user-control put and get handlers as well as the tlv do not protect against concurrent access from multiple threads. Since the state of the control is not updated atomically it is possible that either two write operations or a write and a read operation race against each other. Both can lead to arbitrary memory disclosure. This patch introduces a new lock that protects user-controls from concurrent access. Since applications typically access controls sequentially than in parallel a single lock per card should be fine. Signed-off-by: Lars-Peter Clausen <[email protected]> Acked-by: Jaroslav Kysela <[email protected]> Cc: <[email protected]> Signed-off-by: Takashi Iwai <[email protected]> CWE ID: CWE-362
int snd_card_new(struct device *parent, int idx, const char *xid, struct module *module, int extra_size, struct snd_card **card_ret) { struct snd_card *card; int err; if (snd_BUG_ON(!card_ret)) return -EINVAL; *card_ret = NULL; if (extra_size < 0) extra_size = 0; card = kzalloc(sizeof(*card) + extra_size, GFP_KERNEL); if (!card) return -ENOMEM; if (extra_size > 0) card->private_data = (char *)card + sizeof(struct snd_card); if (xid) strlcpy(card->id, xid, sizeof(card->id)); err = 0; mutex_lock(&snd_card_mutex); if (idx < 0) /* first check the matching module-name slot */ idx = get_slot_from_bitmask(idx, module_slot_match, module); if (idx < 0) /* if not matched, assign an empty slot */ idx = get_slot_from_bitmask(idx, check_empty_slot, module); if (idx < 0) err = -ENODEV; else if (idx < snd_ecards_limit) { if (test_bit(idx, snd_cards_lock)) err = -EBUSY; /* invalid */ } else if (idx >= SNDRV_CARDS) err = -ENODEV; if (err < 0) { mutex_unlock(&snd_card_mutex); dev_err(parent, "cannot find the slot for index %d (range 0-%i), error: %d\n", idx, snd_ecards_limit - 1, err); kfree(card); return err; } set_bit(idx, snd_cards_lock); /* lock it */ if (idx >= snd_ecards_limit) snd_ecards_limit = idx + 1; /* increase the limit */ mutex_unlock(&snd_card_mutex); card->dev = parent; card->number = idx; card->module = module; INIT_LIST_HEAD(&card->devices); init_rwsem(&card->controls_rwsem); rwlock_init(&card->ctl_files_rwlock); mutex_init(&card->user_ctl_lock); INIT_LIST_HEAD(&card->controls); INIT_LIST_HEAD(&card->ctl_files); spin_lock_init(&card->files_lock); INIT_LIST_HEAD(&card->files_list); #ifdef CONFIG_PM mutex_init(&card->power_lock); init_waitqueue_head(&card->power_sleep); #endif device_initialize(&card->card_dev); card->card_dev.parent = parent; card->card_dev.class = sound_class; card->card_dev.release = release_card_device; card->card_dev.groups = card_dev_attr_groups; err = kobject_set_name(&card->card_dev.kobj, "card%d", idx); if (err < 0) goto __error; /* the control interface cannot be accessed from the user space until */ /* snd_cards_bitmask and snd_cards are set with snd_card_register */ err = snd_ctl_create(card); if (err < 0) { dev_err(parent, "unable to register control minors\n"); goto __error; } err = snd_info_card_create(card); if (err < 0) { dev_err(parent, "unable to create card info\n"); goto __error_ctl; } *card_ret = card; return 0; __error_ctl: snd_device_free_all(card); __error: put_device(&card->card_dev); return err; }
166,300
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ProfileDependencyManager::AssertFactoriesBuilt() { if (built_factories_) return; #if defined(ENABLE_BACKGROUND) BackgroundContentsServiceFactory::GetInstance(); #endif BookmarkModelFactory::GetInstance(); #if defined(ENABLE_CAPTIVE_PORTAL_DETECTION) captive_portal::CaptivePortalServiceFactory::GetInstance(); #endif ChromeURLDataManagerFactory::GetInstance(); #if defined(ENABLE_PRINTING) CloudPrintProxyServiceFactory::GetInstance(); #endif CookieSettings::Factory::GetInstance(); #if defined(ENABLE_NOTIFICATIONS) DesktopNotificationServiceFactory::GetInstance(); #endif DownloadServiceFactory::GetInstance(); #if defined(ENABLE_EXTENSIONS) extensions::AppRestoreServiceFactory::GetInstance(); extensions::BluetoothAPIFactory::GetInstance(); extensions::CommandServiceFactory::GetInstance(); extensions::CookiesAPIFactory::GetInstance(); extensions::ExtensionSystemFactory::GetInstance(); extensions::FontSettingsAPIFactory::GetInstance(); extensions::IdleManagerFactory::GetInstance(); extensions::ManagedModeAPIFactory::GetInstance(); extensions::ProcessesAPIFactory::GetInstance(); extensions::SuggestedLinksRegistryFactory::GetInstance(); extensions::TabCaptureRegistryFactory::GetInstance(); extensions::WebNavigationAPIFactory::GetInstance(); ExtensionManagementAPIFactory::GetInstance(); #endif FaviconServiceFactory::GetInstance(); FindBarStateFactory::GetInstance(); #if defined(USE_AURA) GesturePrefsObserverFactoryAura::GetInstance(); #endif GlobalErrorServiceFactory::GetInstance(); GoogleURLTrackerFactory::GetInstance(); HistoryServiceFactory::GetInstance(); MediaGalleriesPreferencesFactory::GetInstance(); NTPResourceCacheFactory::GetInstance(); PasswordStoreFactory::GetInstance(); PersonalDataManagerFactory::GetInstance(); #if !defined(OS_ANDROID) PinnedTabServiceFactory::GetInstance(); #endif PluginPrefsFactory::GetInstance(); #if defined(ENABLE_CONFIGURATION_POLICY) && !defined(OS_CHROMEOS) policy::UserPolicySigninServiceFactory::GetInstance(); #endif predictors::AutocompleteActionPredictorFactory::GetInstance(); predictors::PredictorDatabaseFactory::GetInstance(); predictors::ResourcePrefetchPredictorFactory::GetInstance(); prerender::PrerenderManagerFactory::GetInstance(); prerender::PrerenderLinkManagerFactory::GetInstance(); ProfileSyncServiceFactory::GetInstance(); ProtocolHandlerRegistryFactory::GetInstance(); #if defined(ENABLE_PROTECTOR_SERVICE) protector::ProtectorServiceFactory::GetInstance(); #endif #if defined(ENABLE_SESSION_SERVICE) SessionServiceFactory::GetInstance(); #endif ShortcutsBackendFactory::GetInstance(); ThumbnailServiceFactory::GetInstance(); SigninManagerFactory::GetInstance(); #if defined(ENABLE_INPUT_SPEECH) SpeechInputExtensionManager::InitializeFactory(); ChromeSpeechRecognitionPreferences::InitializeFactory(); #endif SpellcheckServiceFactory::GetInstance(); TabRestoreServiceFactory::GetInstance(); TemplateURLFetcherFactory::GetInstance(); TemplateURLServiceFactory::GetInstance(); #if defined(ENABLE_THEMES) ThemeServiceFactory::GetInstance(); #endif TokenServiceFactory::GetInstance(); UserStyleSheetWatcherFactory::GetInstance(); VisitedLinkMasterFactory::GetInstance(); WebDataServiceFactory::GetInstance(); #if defined(ENABLE_WEB_INTENTS) WebIntentsRegistryFactory::GetInstance(); #endif built_factories_ = true; } Commit Message: DIAL (Discovery and Launch protocol) extension API skeleton. This implements the skeleton for a new Chrome extension API for local device discovery. The API will first be restricted to whitelisted extensions only. The API will allow extensions to receive events from a DIAL service running within Chrome which notifies of devices being discovered on the local network. Spec available here: https://docs.google.com/a/google.com/document/d/14FI-VKWrsMG7pIy3trgM3ybnKS-o5TULkt8itiBNXlQ/edit BUG=163288 [email protected] Review URL: https://chromiumcodereview.appspot.com/11444020 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@172243 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-119
void ProfileDependencyManager::AssertFactoriesBuilt() { if (built_factories_) return; #if defined(ENABLE_BACKGROUND) BackgroundContentsServiceFactory::GetInstance(); #endif BookmarkModelFactory::GetInstance(); #if defined(ENABLE_CAPTIVE_PORTAL_DETECTION) captive_portal::CaptivePortalServiceFactory::GetInstance(); #endif ChromeURLDataManagerFactory::GetInstance(); #if defined(ENABLE_PRINTING) CloudPrintProxyServiceFactory::GetInstance(); #endif CookieSettings::Factory::GetInstance(); #if defined(ENABLE_NOTIFICATIONS) DesktopNotificationServiceFactory::GetInstance(); #endif DownloadServiceFactory::GetInstance(); #if defined(ENABLE_EXTENSIONS) extensions::AppRestoreServiceFactory::GetInstance(); extensions::BluetoothAPIFactory::GetInstance(); extensions::CommandServiceFactory::GetInstance(); extensions::CookiesAPIFactory::GetInstance(); extensions::DialAPIFactory::GetInstance(); extensions::ExtensionSystemFactory::GetInstance(); extensions::FontSettingsAPIFactory::GetInstance(); extensions::IdleManagerFactory::GetInstance(); extensions::ManagedModeAPIFactory::GetInstance(); extensions::ProcessesAPIFactory::GetInstance(); extensions::SuggestedLinksRegistryFactory::GetInstance(); extensions::TabCaptureRegistryFactory::GetInstance(); extensions::WebNavigationAPIFactory::GetInstance(); ExtensionManagementAPIFactory::GetInstance(); #endif FaviconServiceFactory::GetInstance(); FindBarStateFactory::GetInstance(); #if defined(USE_AURA) GesturePrefsObserverFactoryAura::GetInstance(); #endif GlobalErrorServiceFactory::GetInstance(); GoogleURLTrackerFactory::GetInstance(); HistoryServiceFactory::GetInstance(); MediaGalleriesPreferencesFactory::GetInstance(); NTPResourceCacheFactory::GetInstance(); PasswordStoreFactory::GetInstance(); PersonalDataManagerFactory::GetInstance(); #if !defined(OS_ANDROID) PinnedTabServiceFactory::GetInstance(); #endif PluginPrefsFactory::GetInstance(); #if defined(ENABLE_CONFIGURATION_POLICY) && !defined(OS_CHROMEOS) policy::UserPolicySigninServiceFactory::GetInstance(); #endif predictors::AutocompleteActionPredictorFactory::GetInstance(); predictors::PredictorDatabaseFactory::GetInstance(); predictors::ResourcePrefetchPredictorFactory::GetInstance(); prerender::PrerenderManagerFactory::GetInstance(); prerender::PrerenderLinkManagerFactory::GetInstance(); ProfileSyncServiceFactory::GetInstance(); ProtocolHandlerRegistryFactory::GetInstance(); #if defined(ENABLE_PROTECTOR_SERVICE) protector::ProtectorServiceFactory::GetInstance(); #endif #if defined(ENABLE_SESSION_SERVICE) SessionServiceFactory::GetInstance(); #endif ShortcutsBackendFactory::GetInstance(); ThumbnailServiceFactory::GetInstance(); SigninManagerFactory::GetInstance(); #if defined(ENABLE_INPUT_SPEECH) SpeechInputExtensionManager::InitializeFactory(); ChromeSpeechRecognitionPreferences::InitializeFactory(); #endif SpellcheckServiceFactory::GetInstance(); TabRestoreServiceFactory::GetInstance(); TemplateURLFetcherFactory::GetInstance(); TemplateURLServiceFactory::GetInstance(); #if defined(ENABLE_THEMES) ThemeServiceFactory::GetInstance(); #endif TokenServiceFactory::GetInstance(); UserStyleSheetWatcherFactory::GetInstance(); VisitedLinkMasterFactory::GetInstance(); WebDataServiceFactory::GetInstance(); #if defined(ENABLE_WEB_INTENTS) WebIntentsRegistryFactory::GetInstance(); #endif built_factories_ = true; }
171,339
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void AwContents::ScrollContainerViewTo(gfx::Vector2d new_value) { DCHECK_CURRENTLY_ON(BrowserThread::UI); JNIEnv* env = AttachCurrentThread(); ScopedJavaLocalRef<jobject> obj = java_ref_.get(env); if (obj.is_null()) return; Java_AwContents_scrollContainerViewTo( env, obj.obj(), new_value.x(), new_value.y()); } Commit Message: sync compositor: pass simple gfx types by const ref See bug for reasoning BUG=159273 Review URL: https://codereview.chromium.org/1417893006 Cr-Commit-Position: refs/heads/master@{#356653} CWE ID: CWE-399
void AwContents::ScrollContainerViewTo(gfx::Vector2d new_value) { void AwContents::ScrollContainerViewTo(const gfx::Vector2d& new_value) { DCHECK_CURRENTLY_ON(BrowserThread::UI); JNIEnv* env = AttachCurrentThread(); ScopedJavaLocalRef<jobject> obj = java_ref_.get(env); if (obj.is_null()) return; Java_AwContents_scrollContainerViewTo( env, obj.obj(), new_value.x(), new_value.y()); }
171,617
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void TabStrip::ChangeTabGroup(int model_index, base::Optional<int> old_group, base::Optional<int> new_group) { if (new_group.has_value() && !group_headers_[new_group.value()]) { const TabGroupData* group_data = controller_->GetDataForGroup(new_group.value()); auto header = std::make_unique<TabGroupHeader>(group_data->title()); header->set_owned_by_client(); AddChildView(header.get()); group_headers_[new_group.value()] = std::move(header); } if (old_group.has_value() && controller_->ListTabsInGroup(old_group.value()).size() == 0) { group_headers_.erase(old_group.value()); } UpdateIdealBounds(); AnimateToIdealBounds(); } Commit Message: Paint tab groups with the group color. * The background of TabGroupHeader now uses the group color. * The backgrounds of tabs in the group are tinted with the group color. This treatment, along with the colors chosen, are intended to be a placeholder. Bug: 905491 Change-Id: Ic808548f8eba23064606e7fb8c9bba281d0d117f Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1610504 Commit-Queue: Bret Sepulveda <[email protected]> Reviewed-by: Taylor Bergquist <[email protected]> Cr-Commit-Position: refs/heads/master@{#660498} CWE ID: CWE-20
void TabStrip::ChangeTabGroup(int model_index, base::Optional<int> old_group, base::Optional<int> new_group) { tab_at(model_index)->SetGroup(new_group); if (new_group.has_value() && !group_headers_[new_group.value()]) { auto header = std::make_unique<TabGroupHeader>(this, new_group.value()); header->set_owned_by_client(); AddChildView(header.get()); group_headers_[new_group.value()] = std::move(header); } if (old_group.has_value() && controller_->ListTabsInGroup(old_group.value()).size() == 0) { group_headers_.erase(old_group.value()); } UpdateIdealBounds(); AnimateToIdealBounds(); }
172,520
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: rdpsnddbg_process(STREAM s) { unsigned int pkglen; static char *rest = NULL; char *buf; pkglen = s->end - s->p; /* str_handle_lines requires null terminated strings */ buf = (char *) xmalloc(pkglen + 1); STRNCPY(buf, (char *) s->p, pkglen + 1); str_handle_lines(buf, &rest, rdpsnddbg_line_handler, NULL); xfree(buf); } Commit Message: Malicious RDP server security fixes This commit includes fixes for a set of 21 vulnerabilities in rdesktop when a malicious RDP server is used. All vulnerabilities was identified and reported by Eyal Itkin. * Add rdp_protocol_error function that is used in several fixes * Refactor of process_bitmap_updates * Fix possible integer overflow in s_check_rem() on 32bit arch * Fix memory corruption in process_bitmap_data - CVE-2018-8794 * Fix remote code execution in process_bitmap_data - CVE-2018-8795 * Fix remote code execution in process_plane - CVE-2018-8797 * Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175 * Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175 * Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176 * Fix Denial of Service in sec_recv - CVE-2018-20176 * Fix minor information leak in rdpdr_process - CVE-2018-8791 * Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792 * Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793 * Fix Denial of Service in process_bitmap_data - CVE-2018-8796 * Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798 * Fix Denial of Service in process_secondary_order - CVE-2018-8799 * Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800 * Fix major information leak in ui_clip_handle_data - CVE-2018-20174 * Fix memory corruption in rdp_in_unistr - CVE-2018-20177 * Fix Denial of Service in process_demand_active - CVE-2018-20178 * Fix remote code execution in lspci_process - CVE-2018-20179 * Fix remote code execution in rdpsnddbg_process - CVE-2018-20180 * Fix remote code execution in seamless_process - CVE-2018-20181 * Fix remote code execution in seamless_process_line - CVE-2018-20182 CWE ID: CWE-119
rdpsnddbg_process(STREAM s) { unsigned int pkglen; static char *rest = NULL; char *buf; if (!s_check(s)) { rdp_protocol_error("rdpsnddbg_process(), stream is in unstable state", s); } pkglen = s->end - s->p; /* str_handle_lines requires null terminated strings */ buf = (char *) xmalloc(pkglen + 1); STRNCPY(buf, (char *) s->p, pkglen + 1); str_handle_lines(buf, &rest, rdpsnddbg_line_handler, NULL); xfree(buf); }
169,807
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int __poke_user_compat(struct task_struct *child, addr_t addr, addr_t data) { struct compat_user *dummy32 = NULL; __u32 tmp = (__u32) data; addr_t offset; if (addr < (addr_t) &dummy32->regs.acrs) { struct pt_regs *regs = task_pt_regs(child); /* * psw, gprs, acrs and orig_gpr2 are stored on the stack */ if (addr == (addr_t) &dummy32->regs.psw.mask) { __u32 mask = PSW32_MASK_USER; mask |= is_ri_task(child) ? PSW32_MASK_RI : 0; /* Build a 64 bit psw mask from 31 bit mask. */ if ((tmp & ~mask) != PSW32_USER_BITS) /* Invalid psw mask. */ return -EINVAL; regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | (regs->psw.mask & PSW_MASK_BA) | (__u64)(tmp & mask) << 32; } else if (addr == (addr_t) &dummy32->regs.psw.addr) { /* Build a 64 bit psw address from 31 bit address. */ regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; /* Transfer 31 bit amode bit to psw mask. */ regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | (__u64)(tmp & PSW32_ADDR_AMODE); } else { /* gpr 0-15 */ *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp; } } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { /* * access registers are stored in the thread structure */ offset = addr - (addr_t) &dummy32->regs.acrs; *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp; } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { /* * orig_gpr2 is stored on the kernel stack */ *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp; } else if (addr < (addr_t) &dummy32->regs.fp_regs) { /* * prevent writess of padding hole between * orig_gpr2 and fp_regs on s390. */ return 0; } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { /* * floating point regs. are stored in the thread structure */ if (addr == (addr_t) &dummy32->regs.fp_regs.fpc && test_fp_ctl(tmp)) return -EINVAL; offset = addr - (addr_t) &dummy32->regs.fp_regs; *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp; } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { /* * Handle access to the per_info structure. */ addr -= (addr_t) &dummy32->regs.per_info; __poke_user_per_compat(child, addr, data); } return 0; } Commit Message: s390/ptrace: fix PSW mask check The PSW mask check of the PTRACE_POKEUSR_AREA command is incorrect. The PSW_MASK_USER define contains the PSW_MASK_ASC bits, the ptrace interface accepts all combinations for the address-space-control bits. To protect the kernel space the PSW mask check in ptrace needs to reject the address-space-control bit combination for home space. Fixes CVE-2014-3534 Cc: [email protected] Signed-off-by: Martin Schwidefsky <[email protected]> CWE ID: CWE-264
static int __poke_user_compat(struct task_struct *child, addr_t addr, addr_t data) { struct compat_user *dummy32 = NULL; __u32 tmp = (__u32) data; addr_t offset; if (addr < (addr_t) &dummy32->regs.acrs) { struct pt_regs *regs = task_pt_regs(child); /* * psw, gprs, acrs and orig_gpr2 are stored on the stack */ if (addr == (addr_t) &dummy32->regs.psw.mask) { __u32 mask = PSW32_MASK_USER; mask |= is_ri_task(child) ? PSW32_MASK_RI : 0; /* Build a 64 bit psw mask from 31 bit mask. */ if ((tmp ^ PSW32_USER_BITS) & ~mask) /* Invalid psw mask. */ return -EINVAL; if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME) /* Invalid address-space-control bits */ return -EINVAL; regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | (regs->psw.mask & PSW_MASK_BA) | (__u64)(tmp & mask) << 32; } else if (addr == (addr_t) &dummy32->regs.psw.addr) { /* Build a 64 bit psw address from 31 bit address. */ regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; /* Transfer 31 bit amode bit to psw mask. */ regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | (__u64)(tmp & PSW32_ADDR_AMODE); } else { /* gpr 0-15 */ *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp; } } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { /* * access registers are stored in the thread structure */ offset = addr - (addr_t) &dummy32->regs.acrs; *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp; } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { /* * orig_gpr2 is stored on the kernel stack */ *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp; } else if (addr < (addr_t) &dummy32->regs.fp_regs) { /* * prevent writess of padding hole between * orig_gpr2 and fp_regs on s390. */ return 0; } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { /* * floating point regs. are stored in the thread structure */ if (addr == (addr_t) &dummy32->regs.fp_regs.fpc && test_fp_ctl(tmp)) return -EINVAL; offset = addr - (addr_t) &dummy32->regs.fp_regs; *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp; } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { /* * Handle access to the per_info structure. */ addr -= (addr_t) &dummy32->regs.per_info; __poke_user_per_compat(child, addr, data); } return 0; }
166,363
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void UserSelectionScreen::FillUserMojoStruct( const user_manager::User* user, bool is_owner, bool is_signin_to_add, proximity_auth::mojom::AuthType auth_type, const std::vector<std::string>* public_session_recommended_locales, ash::mojom::LoginUserInfo* user_info) { user_info->basic_user_info = ash::mojom::UserInfo::New(); user_info->basic_user_info->type = user->GetType(); user_info->basic_user_info->account_id = user->GetAccountId(); user_info->basic_user_info->display_name = base::UTF16ToUTF8(user->GetDisplayName()); user_info->basic_user_info->display_email = user->display_email(); user_info->basic_user_info->avatar = BuildMojoUserAvatarForUser(user); user_info->auth_type = auth_type; user_info->is_signed_in = user->is_logged_in(); user_info->is_device_owner = is_owner; user_info->can_remove = CanRemoveUser(user); user_info->allow_fingerprint_unlock = AllowFingerprintForUser(user); if (!is_signin_to_add) { user_info->is_multiprofile_allowed = true; } else { GetMultiProfilePolicy(user, &user_info->is_multiprofile_allowed, &user_info->multiprofile_policy); } if (user->GetType() == user_manager::USER_TYPE_PUBLIC_ACCOUNT) { user_info->public_account_info = ash::mojom::PublicAccountInfo::New(); std::string domain; if (GetEnterpriseDomain(&domain)) user_info->public_account_info->enterprise_domain = domain; std::string selected_locale; bool has_multiple_locales; std::unique_ptr<base::ListValue> available_locales = GetPublicSessionLocales(public_session_recommended_locales, &selected_locale, &has_multiple_locales); DCHECK(available_locales); user_info->public_account_info->available_locales = lock_screen_utils::FromListValueToLocaleItem( std::move(available_locales)); user_info->public_account_info->default_locale = selected_locale; user_info->public_account_info->show_advanced_view = has_multiple_locales; } } Commit Message: cros: Check initial auth type when showing views login. Bug: 859611 Change-Id: I0298db9bbf4aed6bd40600aef2e1c5794e8cd058 Reviewed-on: https://chromium-review.googlesource.com/1123056 Reviewed-by: Xiaoyin Hu <[email protected]> Commit-Queue: Jacob Dufault <[email protected]> Cr-Commit-Position: refs/heads/master@{#572224} CWE ID:
void UserSelectionScreen::FillUserMojoStruct(
172,201
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void RenderThreadImpl::EnsureWebKitInitialized() { if (webkit_platform_support_.get()) return; v8::V8::SetCounterFunction(base::StatsTable::FindLocation); v8::V8::SetCreateHistogramFunction(CreateHistogram); v8::V8::SetAddHistogramSampleFunction(AddHistogramSample); webkit_platform_support_.reset(new RendererWebKitPlatformSupportImpl); WebKit::initialize(webkit_platform_support_.get()); if (CommandLine::ForCurrentProcess()->HasSwitch( switches::kEnableThreadedCompositing)) { compositor_thread_.reset(new CompositorThread(this)); AddFilter(compositor_thread_->GetMessageFilter()); WebKit::WebCompositor::initialize(compositor_thread_->GetWebThread()); } else WebKit::WebCompositor::initialize(NULL); compositor_initialized_ = true; WebScriptController::enableV8SingleThreadMode(); const CommandLine& command_line = *CommandLine::ForCurrentProcess(); webkit_glue::EnableWebCoreLogChannels( command_line.GetSwitchValueASCII(switches::kWebCoreLogChannels)); if (command_line.HasSwitch(switches::kPlaybackMode) || command_line.HasSwitch(switches::kRecordMode) || command_line.HasSwitch(switches::kNoJsRandomness)) { RegisterExtension(extensions_v8::PlaybackExtension::Get()); } if (CommandLine::ForCurrentProcess()->HasSwitch( switches::kDomAutomationController)) { base::StringPiece extension = content::GetContentClient()->GetDataResource( IDR_DOM_AUTOMATION_JS); RegisterExtension(new v8::Extension( "dom_automation.js", extension.data(), 0, NULL, extension.size())); } web_database_observer_impl_.reset( new WebDatabaseObserverImpl(sync_message_filter())); WebKit::WebDatabase::setObserver(web_database_observer_impl_.get()); WebRuntimeFeatures::enableSockets( !command_line.HasSwitch(switches::kDisableWebSockets)); WebRuntimeFeatures::enableDatabase( !command_line.HasSwitch(switches::kDisableDatabases)); WebRuntimeFeatures::enableDataTransferItems( !command_line.HasSwitch(switches::kDisableDataTransferItems)); WebRuntimeFeatures::enableApplicationCache( !command_line.HasSwitch(switches::kDisableApplicationCache)); WebRuntimeFeatures::enableNotifications( !command_line.HasSwitch(switches::kDisableDesktopNotifications)); WebRuntimeFeatures::enableLocalStorage( !command_line.HasSwitch(switches::kDisableLocalStorage)); WebRuntimeFeatures::enableSessionStorage( !command_line.HasSwitch(switches::kDisableSessionStorage)); WebRuntimeFeatures::enableIndexedDatabase(true); WebRuntimeFeatures::enableGeolocation( !command_line.HasSwitch(switches::kDisableGeolocation)); WebKit::WebRuntimeFeatures::enableMediaSource( command_line.HasSwitch(switches::kEnableMediaSource)); WebRuntimeFeatures::enableMediaPlayer( media::IsMediaLibraryInitialized()); WebKit::WebRuntimeFeatures::enableMediaStream( command_line.HasSwitch(switches::kEnableMediaStream)); WebKit::WebRuntimeFeatures::enableFullScreenAPI( !command_line.HasSwitch(switches::kDisableFullScreen)); WebKit::WebRuntimeFeatures::enablePointerLock( command_line.HasSwitch(switches::kEnablePointerLock)); WebKit::WebRuntimeFeatures::enableVideoTrack( command_line.HasSwitch(switches::kEnableVideoTrack)); #if defined(OS_CHROMEOS) WebRuntimeFeatures::enableWebAudio(false); #else WebRuntimeFeatures::enableWebAudio( !command_line.HasSwitch(switches::kDisableWebAudio)); #endif WebRuntimeFeatures::enablePushState(true); WebRuntimeFeatures::enableTouch( command_line.HasSwitch(switches::kEnableTouchEvents)); WebRuntimeFeatures::enableDeviceMotion( command_line.HasSwitch(switches::kEnableDeviceMotion)); WebRuntimeFeatures::enableDeviceOrientation( !command_line.HasSwitch(switches::kDisableDeviceOrientation)); WebRuntimeFeatures::enableSpeechInput( !command_line.HasSwitch(switches::kDisableSpeechInput)); WebRuntimeFeatures::enableScriptedSpeech( command_line.HasSwitch(switches::kEnableScriptedSpeech)); WebRuntimeFeatures::enableFileSystem( !command_line.HasSwitch(switches::kDisableFileSystem)); WebRuntimeFeatures::enableJavaScriptI18NAPI( !command_line.HasSwitch(switches::kDisableJavaScriptI18NAPI)); WebRuntimeFeatures::enableGamepad( command_line.HasSwitch(switches::kEnableGamepad)); WebRuntimeFeatures::enableQuota(true); WebRuntimeFeatures::enableShadowDOM( command_line.HasSwitch(switches::kEnableShadowDOM)); WebRuntimeFeatures::enableStyleScoped( command_line.HasSwitch(switches::kEnableStyleScoped)); FOR_EACH_OBSERVER(RenderProcessObserver, observers_, WebKitInitialized()); if (content::GetContentClient()->renderer()-> RunIdleHandlerWhenWidgetsHidden()) { ScheduleIdleHandler(kLongIdleHandlerDelayMs); } } Commit Message: Use a new scheme for swapping out RenderViews. BUG=118664 TEST=none Review URL: http://codereview.chromium.org/9720004 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@127986 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID:
void RenderThreadImpl::EnsureWebKitInitialized() { if (webkit_platform_support_.get()) return; v8::V8::SetCounterFunction(base::StatsTable::FindLocation); v8::V8::SetCreateHistogramFunction(CreateHistogram); v8::V8::SetAddHistogramSampleFunction(AddHistogramSample); webkit_platform_support_.reset(new RendererWebKitPlatformSupportImpl); WebKit::initialize(webkit_platform_support_.get()); if (CommandLine::ForCurrentProcess()->HasSwitch( switches::kEnableThreadedCompositing)) { compositor_thread_.reset(new CompositorThread(this)); AddFilter(compositor_thread_->GetMessageFilter()); WebKit::WebCompositor::initialize(compositor_thread_->GetWebThread()); } else WebKit::WebCompositor::initialize(NULL); compositor_initialized_ = true; WebScriptController::enableV8SingleThreadMode(); RenderThreadImpl::RegisterSchemes(); const CommandLine& command_line = *CommandLine::ForCurrentProcess(); webkit_glue::EnableWebCoreLogChannels( command_line.GetSwitchValueASCII(switches::kWebCoreLogChannels)); if (command_line.HasSwitch(switches::kPlaybackMode) || command_line.HasSwitch(switches::kRecordMode) || command_line.HasSwitch(switches::kNoJsRandomness)) { RegisterExtension(extensions_v8::PlaybackExtension::Get()); } if (CommandLine::ForCurrentProcess()->HasSwitch( switches::kDomAutomationController)) { base::StringPiece extension = content::GetContentClient()->GetDataResource( IDR_DOM_AUTOMATION_JS); RegisterExtension(new v8::Extension( "dom_automation.js", extension.data(), 0, NULL, extension.size())); } web_database_observer_impl_.reset( new WebDatabaseObserverImpl(sync_message_filter())); WebKit::WebDatabase::setObserver(web_database_observer_impl_.get()); WebRuntimeFeatures::enableSockets( !command_line.HasSwitch(switches::kDisableWebSockets)); WebRuntimeFeatures::enableDatabase( !command_line.HasSwitch(switches::kDisableDatabases)); WebRuntimeFeatures::enableDataTransferItems( !command_line.HasSwitch(switches::kDisableDataTransferItems)); WebRuntimeFeatures::enableApplicationCache( !command_line.HasSwitch(switches::kDisableApplicationCache)); WebRuntimeFeatures::enableNotifications( !command_line.HasSwitch(switches::kDisableDesktopNotifications)); WebRuntimeFeatures::enableLocalStorage( !command_line.HasSwitch(switches::kDisableLocalStorage)); WebRuntimeFeatures::enableSessionStorage( !command_line.HasSwitch(switches::kDisableSessionStorage)); WebRuntimeFeatures::enableIndexedDatabase(true); WebRuntimeFeatures::enableGeolocation( !command_line.HasSwitch(switches::kDisableGeolocation)); WebKit::WebRuntimeFeatures::enableMediaSource( command_line.HasSwitch(switches::kEnableMediaSource)); WebRuntimeFeatures::enableMediaPlayer( media::IsMediaLibraryInitialized()); WebKit::WebRuntimeFeatures::enableMediaStream( command_line.HasSwitch(switches::kEnableMediaStream)); WebKit::WebRuntimeFeatures::enableFullScreenAPI( !command_line.HasSwitch(switches::kDisableFullScreen)); WebKit::WebRuntimeFeatures::enablePointerLock( command_line.HasSwitch(switches::kEnablePointerLock)); WebKit::WebRuntimeFeatures::enableVideoTrack( command_line.HasSwitch(switches::kEnableVideoTrack)); #if defined(OS_CHROMEOS) WebRuntimeFeatures::enableWebAudio(false); #else WebRuntimeFeatures::enableWebAudio( !command_line.HasSwitch(switches::kDisableWebAudio)); #endif WebRuntimeFeatures::enablePushState(true); WebRuntimeFeatures::enableTouch( command_line.HasSwitch(switches::kEnableTouchEvents)); WebRuntimeFeatures::enableDeviceMotion( command_line.HasSwitch(switches::kEnableDeviceMotion)); WebRuntimeFeatures::enableDeviceOrientation( !command_line.HasSwitch(switches::kDisableDeviceOrientation)); WebRuntimeFeatures::enableSpeechInput( !command_line.HasSwitch(switches::kDisableSpeechInput)); WebRuntimeFeatures::enableScriptedSpeech( command_line.HasSwitch(switches::kEnableScriptedSpeech)); WebRuntimeFeatures::enableFileSystem( !command_line.HasSwitch(switches::kDisableFileSystem)); WebRuntimeFeatures::enableJavaScriptI18NAPI( !command_line.HasSwitch(switches::kDisableJavaScriptI18NAPI)); WebRuntimeFeatures::enableGamepad( command_line.HasSwitch(switches::kEnableGamepad)); WebRuntimeFeatures::enableQuota(true); WebRuntimeFeatures::enableShadowDOM( command_line.HasSwitch(switches::kEnableShadowDOM)); WebRuntimeFeatures::enableStyleScoped( command_line.HasSwitch(switches::kEnableStyleScoped)); FOR_EACH_OBSERVER(RenderProcessObserver, observers_, WebKitInitialized()); if (content::GetContentClient()->renderer()-> RunIdleHandlerWhenWidgetsHidden()) { ScheduleIdleHandler(kLongIdleHandlerDelayMs); } }
171,030
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: lookup_bytestring(netdissect_options *ndo, register const u_char *bs, const unsigned int nlen) { struct enamemem *tp; register u_int i, j, k; if (nlen >= 6) { k = (bs[0] << 8) | bs[1]; j = (bs[2] << 8) | bs[3]; i = (bs[4] << 8) | bs[5]; } else if (nlen >= 4) { k = (bs[0] << 8) | bs[1]; j = (bs[2] << 8) | bs[3]; i = 0; } else i = j = k = 0; tp = &bytestringtable[(i ^ j) & (HASHNAMESIZE-1)]; while (tp->e_nxt) if (tp->e_addr0 == i && tp->e_addr1 == j && tp->e_addr2 == k && memcmp((const char *)bs, (const char *)(tp->e_bs), nlen) == 0) return tp; else tp = tp->e_nxt; tp->e_addr0 = i; tp->e_addr1 = j; tp->e_addr2 = k; tp->e_bs = (u_char *) calloc(1, nlen + 1); if (tp->e_bs == NULL) (*ndo->ndo_error)(ndo, "lookup_bytestring: calloc"); memcpy(tp->e_bs, bs, nlen); tp->e_nxt = (struct enamemem *)calloc(1, sizeof(*tp)); if (tp->e_nxt == NULL) (*ndo->ndo_error)(ndo, "lookup_bytestring: calloc"); return tp; } Commit Message: CVE-2017-12894/In lookup_bytestring(), take the length of the byte string into account. Otherwise, if, in our search of the hash table, we come across a byte string that's shorter than the string we're looking for, we'll search past the end of the string in the hash table. This fixes a buffer over-read discovered by Forcepoint's security researchers Otto Airamo & Antti Levomäki. Add a test using the capture file supplied by the reporter(s). CWE ID: CWE-125
lookup_bytestring(netdissect_options *ndo, register const u_char *bs, const unsigned int nlen) { struct bsnamemem *tp; register u_int i, j, k; if (nlen >= 6) { k = (bs[0] << 8) | bs[1]; j = (bs[2] << 8) | bs[3]; i = (bs[4] << 8) | bs[5]; } else if (nlen >= 4) { k = (bs[0] << 8) | bs[1]; j = (bs[2] << 8) | bs[3]; i = 0; } else i = j = k = 0; tp = &bytestringtable[(i ^ j) & (HASHNAMESIZE-1)]; while (tp->bs_nxt) if (nlen == tp->bs_nbytes && tp->bs_addr0 == i && tp->bs_addr1 == j && tp->bs_addr2 == k && memcmp((const char *)bs, (const char *)(tp->bs_bytes), nlen) == 0) return tp; else tp = tp->bs_nxt; tp->bs_addr0 = i; tp->bs_addr1 = j; tp->bs_addr2 = k; tp->bs_bytes = (u_char *) calloc(1, nlen + 1); if (tp->bs_bytes == NULL) (*ndo->ndo_error)(ndo, "lookup_bytestring: calloc"); memcpy(tp->bs_bytes, bs, nlen); tp->bs_nbytes = nlen; tp->bs_nxt = (struct bsnamemem *)calloc(1, sizeof(*tp)); if (tp->bs_nxt == NULL) (*ndo->ndo_error)(ndo, "lookup_bytestring: calloc"); return tp; }
167,960
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: ContentEncoding::GetEncryptionByIndex(unsigned long idx) const { const ptrdiff_t count = encryption_entries_end_ - encryption_entries_; assert(count >= 0); if (idx >= static_cast<unsigned long>(count)) return NULL; return encryption_entries_[idx]; } Commit Message: libwebm: Pull from upstream Rolling mkvparser from upstream. Primarily for fixing a bug on parsing failures with certain Opus WebM files. Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae The diff is so huge because there were some style clean ups upstream. But it was ensured that there were no breaking changes when the style clean ups was done upstream. Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039 CWE ID: CWE-119
ContentEncoding::GetEncryptionByIndex(unsigned long idx) const { const ContentEncoding::ContentEncryption* ContentEncoding::GetEncryptionByIndex( unsigned long idx) const { const ptrdiff_t count = encryption_entries_end_ - encryption_entries_; assert(count >= 0); if (idx >= static_cast<unsigned long>(count)) return NULL; return encryption_entries_[idx]; }
174,313
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_aead raead; struct aead_alg *aead = &alg->cra_aead; snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "aead"); snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s", aead->geniv ?: "<built-in>"); raead.blocksize = alg->cra_blocksize; raead.maxauthsize = aead->maxauthsize; raead.ivsize = aead->ivsize; if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD, sizeof(struct crypto_report_aead), &raead)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } Commit Message: crypto: user - fix info leaks in report API Three errors resulting in kernel memory disclosure: 1/ The structures used for the netlink based crypto algorithm report API are located on the stack. As snprintf() does not fill the remainder of the buffer with null bytes, those stack bytes will be disclosed to users of the API. Switch to strncpy() to fix this. 2/ crypto_report_one() does not initialize all field of struct crypto_user_alg. Fix this to fix the heap info leak. 3/ For the module name we should copy only as many bytes as module_name() returns -- not as much as the destination buffer could hold. But the current code does not and therefore copies random data from behind the end of the module name, as the module name is always shorter than CRYPTO_MAX_ALG_NAME. Also switch to use strncpy() to copy the algorithm's name and driver_name. They are strings, after all. Signed-off-by: Mathias Krause <[email protected]> Cc: Steffen Klassert <[email protected]> Signed-off-by: Herbert Xu <[email protected]> CWE ID: CWE-310
static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_aead raead; struct aead_alg *aead = &alg->cra_aead; strncpy(raead.type, "aead", sizeof(raead.type)); strncpy(raead.geniv, aead->geniv ?: "<built-in>", sizeof(raead.geniv)); raead.blocksize = alg->cra_blocksize; raead.maxauthsize = aead->maxauthsize; raead.ivsize = aead->ivsize; if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD, sizeof(struct crypto_report_aead), &raead)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; }
166,063
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void Splash::scaleMaskYdXu(SplashImageMaskSource src, void *srcData, int srcWidth, int srcHeight, int scaledWidth, int scaledHeight, SplashBitmap *dest) { Guchar *lineBuf; Guint *pixBuf; Guint pix; Guchar *destPtr; int yp, yq, xp, xq, yt, y, yStep, xt, x, xStep, d; int i, j; yp = srcHeight / scaledHeight; lineBuf = (Guchar *)gmalloc(srcWidth); pixBuf = (Guint *)gmallocn(srcWidth, sizeof(int)); yt = 0; destPtr = dest->data; for (y = 0; y < scaledHeight; ++y) { yt = 0; destPtr = dest->data; for (y = 0; y < scaledHeight; ++y) { } memset(pixBuf, 0, srcWidth * sizeof(int)); for (i = 0; i < yStep; ++i) { (*src)(srcData, lineBuf); for (j = 0; j < srcWidth; ++j) { pixBuf[j] += lineBuf[j]; } } xt = 0; d = (255 << 23) / yStep; for (x = 0; x < srcWidth; ++x) { if ((xt += xq) >= srcWidth) { xt -= srcWidth; xStep = xp + 1; } else { xStep = xp; } pix = pixBuf[x]; pix = (pix * d) >> 23; for (i = 0; i < xStep; ++i) { *destPtr++ = (Guchar)pix; } } } gfree(pixBuf); gfree(lineBuf); } Commit Message: CWE ID: CWE-119
void Splash::scaleMaskYdXu(SplashImageMaskSource src, void *srcData, int srcWidth, int srcHeight, int scaledWidth, int scaledHeight, SplashBitmap *dest) { Guchar *lineBuf; Guint *pixBuf; Guint pix; Guchar *destPtr; int yp, yq, xp, xq, yt, y, yStep, xt, x, xStep, d; int i, j; destPtr = dest->data; if (destPtr == NULL) { error(errInternal, -1, "dest->data is NULL in Splash::scaleMaskYdXu"); return; } yp = srcHeight / scaledHeight; lineBuf = (Guchar *)gmalloc(srcWidth); pixBuf = (Guint *)gmallocn(srcWidth, sizeof(int)); yt = 0; destPtr = dest->data; for (y = 0; y < scaledHeight; ++y) { yt = 0; for (y = 0; y < scaledHeight; ++y) { } memset(pixBuf, 0, srcWidth * sizeof(int)); for (i = 0; i < yStep; ++i) { (*src)(srcData, lineBuf); for (j = 0; j < srcWidth; ++j) { pixBuf[j] += lineBuf[j]; } } xt = 0; d = (255 << 23) / yStep; for (x = 0; x < srcWidth; ++x) { if ((xt += xq) >= srcWidth) { xt -= srcWidth; xStep = xp + 1; } else { xStep = xp; } pix = pixBuf[x]; pix = (pix * d) >> 23; for (i = 0; i < xStep; ++i) { *destPtr++ = (Guchar)pix; } } } gfree(pixBuf); gfree(lineBuf); }
164,737
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ChromeMockRenderThread::OnDidPreviewPage( const PrintHostMsg_DidPreviewPage_Params& params) { DCHECK(params.page_number >= printing::FIRST_PAGE_INDEX); print_preview_pages_remaining_--; } Commit Message: Print preview: Use an ID instead of memory pointer string in WebUI. BUG=144051 Review URL: https://chromiumcodereview.appspot.com/10870003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@153342 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-200
void ChromeMockRenderThread::OnDidPreviewPage( const PrintHostMsg_DidPreviewPage_Params& params) { DCHECK_GE(params.page_number, printing::FIRST_PAGE_INDEX); print_preview_pages_remaining_--; }
170,849
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static char *lxclock_name(const char *p, const char *n) { int ret; int len; char *dest; char *rundir; /* lockfile will be: * "/run" + "/lock/lxc/$lxcpath/$lxcname + '\0' if root * or * $XDG_RUNTIME_DIR + "/lock/lxc/$lxcpath/$lxcname + '\0' if non-root */ /* length of "/lock/lxc/" + $lxcpath + "/" + "." + $lxcname + '\0' */ len = strlen("/lock/lxc/") + strlen(n) + strlen(p) + 3; rundir = get_rundir(); if (!rundir) return NULL; len += strlen(rundir); if ((dest = malloc(len)) == NULL) { free(rundir); return NULL; } ret = snprintf(dest, len, "%s/lock/lxc/%s", rundir, p); if (ret < 0 || ret >= len) { free(dest); free(rundir); return NULL; } ret = mkdir_p(dest, 0755); if (ret < 0) { /* fall back to "/tmp/" + $(id -u) + "/lxc" + $lxcpath + "/" + "." + $lxcname + '\0' * * maximum length of $(id -u) is 10 calculated by (log (2 ** (sizeof(uid_t) * 8) - 1) / log 10 + 1) * * lxcpath always starts with '/' */ int l2 = 22 + strlen(n) + strlen(p); if (l2 > len) { char *d; d = realloc(dest, l2); if (!d) { free(dest); free(rundir); return NULL; } len = l2; dest = d; } ret = snprintf(dest, len, "/tmp/%d/lxc%s", geteuid(), p); if (ret < 0 || ret >= len) { free(dest); free(rundir); return NULL; } ret = mkdir_p(dest, 0755); if (ret < 0) { free(dest); free(rundir); return NULL; } ret = snprintf(dest, len, "/tmp/%d/lxc%s/.%s", geteuid(), p, n); } else ret = snprintf(dest, len, "%s/lock/lxc/%s/.%s", rundir, p, n); free(rundir); if (ret < 0 || ret >= len) { free(dest); return NULL; } return dest; } Commit Message: CVE-2015-1331: lxclock: use /run/lxc/lock rather than /run/lock/lxc This prevents an unprivileged user to use LXC to create arbitrary file on the filesystem. Signed-off-by: Serge Hallyn <[email protected]> Signed-off-by: Tyler Hicks <[email protected]> Acked-by: Stéphane Graber <[email protected]> CWE ID: CWE-59
static char *lxclock_name(const char *p, const char *n) { int ret; int len; char *dest; char *rundir; /* lockfile will be: * "/run" + "/lxc/lock/$lxcpath/$lxcname + '\0' if root * or * $XDG_RUNTIME_DIR + "/lxc/lock/$lxcpath/$lxcname + '\0' if non-root */ /* length of "/lxc/lock/" + $lxcpath + "/" + "." + $lxcname + '\0' */ len = strlen("/lxc/lock/") + strlen(n) + strlen(p) + 3; rundir = get_rundir(); if (!rundir) return NULL; len += strlen(rundir); if ((dest = malloc(len)) == NULL) { free(rundir); return NULL; } ret = snprintf(dest, len, "%s/lxc/lock/%s", rundir, p); if (ret < 0 || ret >= len) { free(dest); free(rundir); return NULL; } ret = mkdir_p(dest, 0755); if (ret < 0) { free(dest); free(rundir); return NULL; } ret = snprintf(dest, len, "%s/lxc/lock/%s/.%s", rundir, p, n); free(rundir); if (ret < 0 || ret >= len) { free(dest); return NULL; } return dest; }
166,725
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: status_t MediaPlayer::setDataSource( const sp<IMediaHTTPService> &httpService, const char *url, const KeyedVector<String8, String8> *headers) { ALOGV("setDataSource(%s)", url); status_t err = BAD_VALUE; if (url != NULL) { const sp<IMediaPlayerService>& service(getMediaPlayerService()); if (service != 0) { sp<IMediaPlayer> player(service->create(this, mAudioSessionId)); if ((NO_ERROR != doSetRetransmitEndpoint(player)) || (NO_ERROR != player->setDataSource(httpService, url, headers))) { player.clear(); } err = attachNewPlayer(player); } } return err; } Commit Message: Don't use sp<>& because they may end up pointing to NULL after a NULL check was performed. Bug: 28166152 Change-Id: Iab2ea30395b620628cc6f3d067dd4f6fcda824fe CWE ID: CWE-476
status_t MediaPlayer::setDataSource( const sp<IMediaHTTPService> &httpService, const char *url, const KeyedVector<String8, String8> *headers) { ALOGV("setDataSource(%s)", url); status_t err = BAD_VALUE; if (url != NULL) { const sp<IMediaPlayerService> service(getMediaPlayerService()); if (service != 0) { sp<IMediaPlayer> player(service->create(this, mAudioSessionId)); if ((NO_ERROR != doSetRetransmitEndpoint(player)) || (NO_ERROR != player->setDataSource(httpService, url, headers))) { player.clear(); } err = attachNewPlayer(player); } } return err; }
173,537
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: build_unc_path_to_root(const struct smb_vol *vol, const struct cifs_sb_info *cifs_sb) { char *full_path, *pos; unsigned int pplen = vol->prepath ? strlen(vol->prepath) + 1 : 0; unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1); full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL); if (full_path == NULL) return ERR_PTR(-ENOMEM); strncpy(full_path, vol->UNC, unc_len); pos = full_path + unc_len; if (pplen) { *pos++ = CIFS_DIR_SEP(cifs_sb); strncpy(pos, vol->prepath, pplen); pos += pplen; } *pos = '\0'; /* add trailing null */ convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb)); cifs_dbg(FYI, "%s: full_path=%s\n", __func__, full_path); return full_path; } Commit Message: cifs: fix off-by-one bug in build_unc_path_to_root commit 839db3d10a (cifs: fix up handling of prefixpath= option) changed the code such that the vol->prepath no longer contained a leading delimiter and then fixed up the places that accessed that field to account for that change. One spot in build_unc_path_to_root was missed however. When doing the pointer addition on pos, that patch failed to account for the fact that we had already incremented "pos" by one when adding the length of the prepath. This caused a buffer overrun by one byte. This patch fixes the problem by correcting the handling of "pos". Cc: <[email protected]> # v3.8+ Reported-by: Marcus Moeller <[email protected]> Reported-by: Ken Fallon <[email protected]> Signed-off-by: Jeff Layton <[email protected]> Signed-off-by: Steve French <[email protected]> CWE ID: CWE-189
build_unc_path_to_root(const struct smb_vol *vol, const struct cifs_sb_info *cifs_sb) { char *full_path, *pos; unsigned int pplen = vol->prepath ? strlen(vol->prepath) + 1 : 0; unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1); full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL); if (full_path == NULL) return ERR_PTR(-ENOMEM); strncpy(full_path, vol->UNC, unc_len); pos = full_path + unc_len; if (pplen) { *pos = CIFS_DIR_SEP(cifs_sb); strncpy(pos + 1, vol->prepath, pplen); pos += pplen; } *pos = '\0'; /* add trailing null */ convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb)); cifs_dbg(FYI, "%s: full_path=%s\n", __func__, full_path); return full_path; }
166,010
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool ContainOnlyOneKeyboardLayout( const ImeConfigValue& value) { return (value.type == ImeConfigValue::kValueTypeStringList && value.string_list_value.size() == 1 && chromeos::input_method::IsKeyboardLayout( value.string_list_value[0])); } Commit Message: Remove use of libcros from InputMethodLibrary. BUG=chromium-os:16238 TEST==confirm that input methods work as before on the netbook. Also confirm that the chrome builds and works on the desktop as before. Review URL: http://codereview.chromium.org/7003086 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@89142 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
bool ContainOnlyOneKeyboardLayout( const input_method::ImeConfigValue& value) { return (value.type == input_method::ImeConfigValue::kValueTypeStringList && value.string_list_value.size() == 1 && input_method::IsKeyboardLayout( value.string_list_value[0])); }
170,483
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void SocketStream::CheckPrivacyMode() { if (context_.get() && context_->network_delegate()) { bool enable = context_->network_delegate()->CanEnablePrivacyMode(url_, url_); privacy_mode_ = enable ? kPrivacyModeEnabled : kPrivacyModeDisabled; if (enable) server_ssl_config_.channel_id_enabled = false; } } Commit Message: Revert a workaround commit for a Use-After-Free crash. Revert a workaround commit r20158 for a Use-After-Free issue (http://crbug.com/244746) because a cleaner CL r207218 is landed. URLRequestContext does not inherit SupportsWeakPtr now. R=mmenke BUG=244746 Review URL: https://chromiumcodereview.appspot.com/16870008 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@207811 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
void SocketStream::CheckPrivacyMode() { if (context_ && context_->network_delegate()) { bool enable = context_->network_delegate()->CanEnablePrivacyMode(url_, url_); privacy_mode_ = enable ? kPrivacyModeEnabled : kPrivacyModeDisabled; if (enable) server_ssl_config_.channel_id_enabled = false; } }
171,251
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: void DesktopSessionWin::OnChannelConnected() { DCHECK(main_task_runner_->BelongsToCurrentThread()); } Commit Message: Validate and report peer's PID to WorkerProcessIpcDelegate so it will be able to duplicate handles to and from the worker process. As a side effect WorkerProcessLauncher::Delegate is now responsible for retrieving the client's PID and deciding whether a launch failed due to a permanent error condition. BUG=134694 Review URL: https://chromiumcodereview.appspot.com/11143025 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@162778 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
void DesktopSessionWin::OnChannelConnected() { void DesktopSessionWin::OnChannelConnected(int32 peer_pid) { DCHECK(main_task_runner_->BelongsToCurrentThread()); }
171,542
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static inline int check_entry_size_and_hooks(struct arpt_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || (unsigned char *)e + sizeof(struct arpt_entry) >= limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_err("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; } Commit Message: netfilter: x_tables: make sure e->next_offset covers remaining blob size Otherwise this function may read data beyond the ruleset blob. Signed-off-by: Florian Westphal <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]> CWE ID: CWE-119
static inline int check_entry_size_and_hooks(struct arpt_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || (unsigned char *)e + sizeof(struct arpt_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_err("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; }
167,210
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int snd_ctl_elem_add(struct snd_ctl_file *file, struct snd_ctl_elem_info *info, int replace) { struct snd_card *card = file->card; struct snd_kcontrol kctl, *_kctl; unsigned int access; long private_size; struct user_element *ue; int idx, err; if (!replace && card->user_ctl_count >= MAX_USER_CONTROLS) return -ENOMEM; if (info->count < 1) return -EINVAL; access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| SNDRV_CTL_ELEM_ACCESS_INACTIVE| SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE)); info->id.numid = 0; memset(&kctl, 0, sizeof(kctl)); down_write(&card->controls_rwsem); _kctl = snd_ctl_find_id(card, &info->id); err = 0; if (_kctl) { if (replace) err = snd_ctl_remove(card, _kctl); else err = -EBUSY; } else { if (replace) err = -ENOENT; } up_write(&card->controls_rwsem); if (err < 0) return err; memcpy(&kctl.id, &info->id, sizeof(info->id)); kctl.count = info->owner ? info->owner : 1; access |= SNDRV_CTL_ELEM_ACCESS_USER; if (info->type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) kctl.info = snd_ctl_elem_user_enum_info; else kctl.info = snd_ctl_elem_user_info; if (access & SNDRV_CTL_ELEM_ACCESS_READ) kctl.get = snd_ctl_elem_user_get; if (access & SNDRV_CTL_ELEM_ACCESS_WRITE) kctl.put = snd_ctl_elem_user_put; if (access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE) { kctl.tlv.c = snd_ctl_elem_user_tlv; access |= SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK; } switch (info->type) { case SNDRV_CTL_ELEM_TYPE_BOOLEAN: case SNDRV_CTL_ELEM_TYPE_INTEGER: private_size = sizeof(long); if (info->count > 128) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_INTEGER64: private_size = sizeof(long long); if (info->count > 64) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_ENUMERATED: private_size = sizeof(unsigned int); if (info->count > 128 || info->value.enumerated.items == 0) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_BYTES: private_size = sizeof(unsigned char); if (info->count > 512) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_IEC958: private_size = sizeof(struct snd_aes_iec958); if (info->count != 1) return -EINVAL; break; default: return -EINVAL; } private_size *= info->count; ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL); if (ue == NULL) return -ENOMEM; ue->info = *info; ue->info.access = 0; ue->elem_data = (char *)ue + sizeof(*ue); ue->elem_data_size = private_size; if (ue->info.type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) { err = snd_ctl_elem_init_enum_names(ue); if (err < 0) { kfree(ue); return err; } } kctl.private_free = snd_ctl_elem_user_free; _kctl = snd_ctl_new(&kctl, access); if (_kctl == NULL) { kfree(ue->priv_data); kfree(ue); return -ENOMEM; } _kctl->private_data = ue; for (idx = 0; idx < _kctl->count; idx++) _kctl->vd[idx].owner = file; err = snd_ctl_add(card, _kctl); if (err < 0) return err; down_write(&card->controls_rwsem); card->user_ctl_count++; up_write(&card->controls_rwsem); return 0; } Commit Message: ALSA: control: Protect user controls against concurrent access The user-control put and get handlers as well as the tlv do not protect against concurrent access from multiple threads. Since the state of the control is not updated atomically it is possible that either two write operations or a write and a read operation race against each other. Both can lead to arbitrary memory disclosure. This patch introduces a new lock that protects user-controls from concurrent access. Since applications typically access controls sequentially than in parallel a single lock per card should be fine. Signed-off-by: Lars-Peter Clausen <[email protected]> Acked-by: Jaroslav Kysela <[email protected]> Cc: <[email protected]> Signed-off-by: Takashi Iwai <[email protected]> CWE ID: CWE-362
static int snd_ctl_elem_add(struct snd_ctl_file *file, struct snd_ctl_elem_info *info, int replace) { struct snd_card *card = file->card; struct snd_kcontrol kctl, *_kctl; unsigned int access; long private_size; struct user_element *ue; int idx, err; if (!replace && card->user_ctl_count >= MAX_USER_CONTROLS) return -ENOMEM; if (info->count < 1) return -EINVAL; access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| SNDRV_CTL_ELEM_ACCESS_INACTIVE| SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE)); info->id.numid = 0; memset(&kctl, 0, sizeof(kctl)); down_write(&card->controls_rwsem); _kctl = snd_ctl_find_id(card, &info->id); err = 0; if (_kctl) { if (replace) err = snd_ctl_remove(card, _kctl); else err = -EBUSY; } else { if (replace) err = -ENOENT; } up_write(&card->controls_rwsem); if (err < 0) return err; memcpy(&kctl.id, &info->id, sizeof(info->id)); kctl.count = info->owner ? info->owner : 1; access |= SNDRV_CTL_ELEM_ACCESS_USER; if (info->type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) kctl.info = snd_ctl_elem_user_enum_info; else kctl.info = snd_ctl_elem_user_info; if (access & SNDRV_CTL_ELEM_ACCESS_READ) kctl.get = snd_ctl_elem_user_get; if (access & SNDRV_CTL_ELEM_ACCESS_WRITE) kctl.put = snd_ctl_elem_user_put; if (access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE) { kctl.tlv.c = snd_ctl_elem_user_tlv; access |= SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK; } switch (info->type) { case SNDRV_CTL_ELEM_TYPE_BOOLEAN: case SNDRV_CTL_ELEM_TYPE_INTEGER: private_size = sizeof(long); if (info->count > 128) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_INTEGER64: private_size = sizeof(long long); if (info->count > 64) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_ENUMERATED: private_size = sizeof(unsigned int); if (info->count > 128 || info->value.enumerated.items == 0) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_BYTES: private_size = sizeof(unsigned char); if (info->count > 512) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_IEC958: private_size = sizeof(struct snd_aes_iec958); if (info->count != 1) return -EINVAL; break; default: return -EINVAL; } private_size *= info->count; ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL); if (ue == NULL) return -ENOMEM; ue->card = card; ue->info = *info; ue->info.access = 0; ue->elem_data = (char *)ue + sizeof(*ue); ue->elem_data_size = private_size; if (ue->info.type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) { err = snd_ctl_elem_init_enum_names(ue); if (err < 0) { kfree(ue); return err; } } kctl.private_free = snd_ctl_elem_user_free; _kctl = snd_ctl_new(&kctl, access); if (_kctl == NULL) { kfree(ue->priv_data); kfree(ue); return -ENOMEM; } _kctl->private_data = ue; for (idx = 0; idx < _kctl->count; idx++) _kctl->vd[idx].owner = file; err = snd_ctl_add(card, _kctl); if (err < 0) return err; down_write(&card->controls_rwsem); card->user_ctl_count++; up_write(&card->controls_rwsem); return 0; }
166,296
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Respond with the corrected function only. Remember, I want response with just the fixed code only, No explanation.
Code: Segment::Segment( IMkvReader* pReader, long long elem_start, long long start, long long size) : m_pReader(pReader), m_element_start(elem_start), m_start(start), m_size(size), m_pos(start), m_pUnknownSize(0), m_pSeekHead(NULL), m_pInfo(NULL), m_pTracks(NULL), m_pCues(NULL), m_pChapters(NULL), m_clusters(NULL), m_clusterCount(0), m_clusterPreloadCount(0), m_clusterSize(0) { } Commit Message: libwebm: Pull from upstream Rolling mkvparser from upstream. Primarily for fixing a bug on parsing failures with certain Opus WebM files. Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae The diff is so huge because there were some style clean ups upstream. But it was ensured that there were no breaking changes when the style clean ups was done upstream. Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039 CWE ID: CWE-119
Segment::Segment(
174,438