instruction
stringclasses
1 value
input
stringlengths
90
139k
output
stringlengths
16
138k
__index_level_0__
int64
165k
175k
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst) { struct xdr_buf *sndbuf = &rqst->rq_snd_buf; struct svc_rdma_op_ctxt *ctxt; struct svc_rdma_req_map *vec; struct ib_send_wr send_wr; int ret; vec = svc_rdma_get_req_map(rdma); ret = svc_rdma_map_xdr(rdma, sndbuf, vec, false); if (ret) goto out_err; ret = svc_rdma_repost_recv(rdma, GFP_NOIO); if (ret) goto out_err; ctxt = svc_rdma_get_context(rdma); ctxt->pages[0] = virt_to_page(rqst->rq_buffer); ctxt->count = 1; ctxt->direction = DMA_TO_DEVICE; ctxt->sge[0].lkey = rdma->sc_pd->local_dma_lkey; ctxt->sge[0].length = sndbuf->len; ctxt->sge[0].addr = ib_dma_map_page(rdma->sc_cm_id->device, ctxt->pages[0], 0, sndbuf->len, DMA_TO_DEVICE); if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) { ret = -EIO; goto out_unmap; } svc_rdma_count_mappings(rdma, ctxt); memset(&send_wr, 0, sizeof(send_wr)); ctxt->cqe.done = svc_rdma_wc_send; send_wr.wr_cqe = &ctxt->cqe; send_wr.sg_list = ctxt->sge; send_wr.num_sge = 1; send_wr.opcode = IB_WR_SEND; send_wr.send_flags = IB_SEND_SIGNALED; ret = svc_rdma_send(rdma, &send_wr); if (ret) { ret = -EIO; goto out_unmap; } out_err: svc_rdma_put_req_map(rdma, vec); dprintk("svcrdma: %s returns %d\n", __func__, ret); return ret; out_unmap: svc_rdma_unmap_dma(ctxt); svc_rdma_put_context(ctxt, 1); goto out_err; } Commit Message: Merge tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux Pull nfsd updates from Bruce Fields: "Another RDMA update from Chuck Lever, and a bunch of miscellaneous bugfixes" * tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux: (26 commits) nfsd: Fix up the "supattr_exclcreat" attributes nfsd: encoders mustn't use unitialized values in error cases nfsd: fix undefined behavior in nfsd4_layout_verify lockd: fix lockd shutdown race NFSv4: Fix callback server shutdown SUNRPC: Refactor svc_set_num_threads() NFSv4.x/callback: Create the callback service through svc_create_pooled lockd: remove redundant check on block svcrdma: Clean out old XDR encoders svcrdma: Remove the req_map cache svcrdma: Remove unused RDMA Write completion handler svcrdma: Reduce size of sge array in struct svc_rdma_op_ctxt svcrdma: Clean up RPC-over-RDMA backchannel reply processing svcrdma: Report Write/Reply chunk overruns svcrdma: Clean up RDMA_ERROR path svcrdma: Use rdma_rw API in RPC reply path svcrdma: Introduce local rdma_rw API helpers svcrdma: Clean up svc_rdma_get_inv_rkey() svcrdma: Add helper to save pages under I/O svcrdma: Eliminate RPCRDMA_SQ_DEPTH_MULT ... CWE ID: CWE-404
static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst) { struct svc_rdma_op_ctxt *ctxt; int ret; ctxt = svc_rdma_get_context(rdma); /* rpcrdma_bc_send_request builds the transport header and * the backchannel RPC message in the same buffer. Thus only * one SGE is needed to send both. */ ret = svc_rdma_map_reply_hdr(rdma, ctxt, rqst->rq_buffer, rqst->rq_snd_buf.len); if (ret < 0) goto out_err; ret = svc_rdma_repost_recv(rdma, GFP_NOIO); if (ret) goto out_err; ret = svc_rdma_post_send_wr(rdma, ctxt, 1, 0); if (ret) goto out_unmap; out_err: dprintk("svcrdma: %s returns %d\n", __func__, ret); return ret; out_unmap: svc_rdma_unmap_dma(ctxt); svc_rdma_put_context(ctxt, 1); ret = -EIO; goto out_err; }
168,157
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void cJSON_ReplaceItemInObject( cJSON *object, const char *string, cJSON *newitem ) { int i = 0; cJSON *c = object->child; while ( c && cJSON_strcasecmp( c->string, string ) ) { ++i; c = c->next; } if ( c ) { newitem->string = cJSON_strdup( string ); cJSON_ReplaceItemInArray( object, i, newitem ); } } Commit Message: Fix a buffer overflow / heap corruption issue that could occur if a malformed JSON string was passed on the control channel. This issue, present in the cJSON library, was already fixed upstream, so was addressed here in iperf3 by importing a newer version of cJSON (plus local ESnet modifications). Discovered and reported by Dave McDaniel, Cisco Talos. Based on a patch by @dopheide-esnet, with input from @DaveGamble. Cross-references: TALOS-CAN-0164, ESNET-SECADV-2016-0001, CVE-2016-4303 (cherry picked from commit ed94082be27d971a5e1b08b666e2c217cf470a40) Signed-off-by: Bruce A. Mah <[email protected]> CWE ID: CWE-119
void cJSON_ReplaceItemInObject( cJSON *object, const char *string, cJSON *newitem )
167,296
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: cf2_initGlobalRegionBuffer( CFF_Decoder* decoder, CF2_UInt idx, CF2_Buffer buf ) { FT_ASSERT( decoder && decoder->globals ); FT_ZERO( buf ); idx += decoder->globals_bias; if ( idx >= decoder->num_globals ) return TRUE; /* error */ buf->start = buf->ptr = decoder->globals[idx]; buf->end = decoder->globals[idx + 1]; } Commit Message: CWE ID: CWE-20
cf2_initGlobalRegionBuffer( CFF_Decoder* decoder, CF2_UInt idx, CF2_Buffer buf ) { FT_ASSERT( decoder ); FT_ZERO( buf ); idx += decoder->globals_bias; if ( idx >= decoder->num_globals ) return TRUE; /* error */ FT_ASSERT( decoder->globals ); buf->start = buf->ptr = decoder->globals[idx]; buf->end = decoder->globals[idx + 1]; }
165,221
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) { struct sctp_chunk *chunk; sctp_chunkhdr_t *ch = NULL; /* The assumption is that we are safe to process the chunks * at this time. */ if ((chunk = queue->in_progress)) { /* There is a packet that we have been working on. * Any post processing work to do before we move on? */ if (chunk->singleton || chunk->end_of_packet || chunk->pdiscard) { sctp_chunk_free(chunk); chunk = queue->in_progress = NULL; } else { /* Nothing to do. Next chunk in the packet, please. */ ch = (sctp_chunkhdr_t *) chunk->chunk_end; /* Force chunk->skb->data to chunk->chunk_end. */ skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data); /* Verify that we have at least chunk headers * worth of buffer left. */ if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) { sctp_chunk_free(chunk); chunk = queue->in_progress = NULL; } } } /* Do we need to take the next packet out of the queue to process? */ if (!chunk) { struct list_head *entry; /* Is the queue empty? */ if (list_empty(&queue->in_chunk_list)) return NULL; entry = queue->in_chunk_list.next; chunk = queue->in_progress = list_entry(entry, struct sctp_chunk, list); list_del_init(entry); /* This is the first chunk in the packet. */ chunk->singleton = 1; ch = (sctp_chunkhdr_t *) chunk->skb->data; chunk->data_accepted = 0; } chunk->chunk_hdr = ch; chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); /* In the unlikely case of an IP reassembly, the skb could be * non-linear. If so, update chunk_end so that it doesn't go past * the skb->tail. */ if (unlikely(skb_is_nonlinear(chunk->skb))) { if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) chunk->chunk_end = skb_tail_pointer(chunk->skb); } skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t)); chunk->subh.v = NULL; /* Subheader is no longer valid. */ if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) { /* This is not a singleton */ chunk->singleton = 0; } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { /* RFC 2960, Section 6.10 Bundling * * Partial chunks MUST NOT be placed in an SCTP packet. * If the receiver detects a partial chunk, it MUST drop * the chunk. * * Since the end of the chunk is past the end of our buffer * (which contains the whole packet, we can freely discard * the whole packet. */ sctp_chunk_free(chunk); chunk = queue->in_progress = NULL; return NULL; } else { /* We are at the end of the packet, so mark the chunk * in case we need to send a SACK. */ chunk->end_of_packet = 1; } pr_debug("+++sctp_inq_pop+++ chunk:%p[%s], length:%d, skb->len:%d\n", chunk, sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)), ntohs(chunk->chunk_hdr->length), chunk->skb->len); return chunk; } Commit Message: net: sctp: fix remote memory pressure from excessive queueing This scenario is not limited to ASCONF, just taken as one example triggering the issue. When receiving ASCONF probes in the form of ... -------------- INIT[ASCONF; ASCONF_ACK] -------------> <----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------ -------------------- COOKIE-ECHO --------------------> <-------------------- COOKIE-ACK --------------------- ---- ASCONF_a; [ASCONF_b; ...; ASCONF_n;] JUNK ------> [...] ---- ASCONF_m; [ASCONF_o; ...; ASCONF_z;] JUNK ------> ... where ASCONF_a, ASCONF_b, ..., ASCONF_z are good-formed ASCONFs and have increasing serial numbers, we process such ASCONF chunk(s) marked with !end_of_packet and !singleton, since we have not yet reached the SCTP packet end. SCTP does only do verification on a chunk by chunk basis, as an SCTP packet is nothing more than just a container of a stream of chunks which it eats up one by one. We could run into the case that we receive a packet with a malformed tail, above marked as trailing JUNK. All previous chunks are here goodformed, so the stack will eat up all previous chunks up to this point. In case JUNK does not fit into a chunk header and there are no more other chunks in the input queue, or in case JUNK contains a garbage chunk header, but the encoded chunk length would exceed the skb tail, or we came here from an entirely different scenario and the chunk has pdiscard=1 mark (without having had a flush point), it will happen, that we will excessively queue up the association's output queue (a correct final chunk may then turn it into a response flood when flushing the queue ;)): I ran a simple script with incremental ASCONF serial numbers and could see the server side consuming excessive amount of RAM [before/after: up to 2GB and more]. The issue at heart is that the chunk train basically ends with !end_of_packet and !singleton markers and since commit 2e3216cd54b1 ("sctp: Follow security requirement of responding with 1 packet") therefore preventing an output queue flush point in sctp_do_sm() -> sctp_cmd_interpreter() on the input chunk (chunk = event_arg) even though local_cork is set, but its precedence has changed since then. In the normal case, the last chunk with end_of_packet=1 would trigger the queue flush to accommodate possible outgoing bundling. In the input queue, sctp_inq_pop() seems to do the right thing in terms of discarding invalid chunks. So, above JUNK will not enter the state machine and instead be released and exit the sctp_assoc_bh_rcv() chunk processing loop. It's simply the flush point being missing at loop exit. Adding a try-flush approach on the output queue might not work as the underlying infrastructure might be long gone at this point due to the side-effect interpreter run. One possibility, albeit a bit of a kludge, would be to defer invalid chunk freeing into the state machine in order to possibly trigger packet discards and thus indirectly a queue flush on error. It would surely be better to discard chunks as in the current, perhaps better controlled environment, but going back and forth, it's simply architecturally not possible. I tried various trailing JUNK attack cases and it seems to look good now. Joint work with Vlad Yasevich. Fixes: 2e3216cd54b1 ("sctp: Follow security requirement of responding with 1 packet") Signed-off-by: Daniel Borkmann <[email protected]> Signed-off-by: Vlad Yasevich <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-399
struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) { struct sctp_chunk *chunk; sctp_chunkhdr_t *ch = NULL; /* The assumption is that we are safe to process the chunks * at this time. */ if ((chunk = queue->in_progress)) { /* There is a packet that we have been working on. * Any post processing work to do before we move on? */ if (chunk->singleton || chunk->end_of_packet || chunk->pdiscard) { sctp_chunk_free(chunk); chunk = queue->in_progress = NULL; } else { /* Nothing to do. Next chunk in the packet, please. */ ch = (sctp_chunkhdr_t *) chunk->chunk_end; /* Force chunk->skb->data to chunk->chunk_end. */ skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data); /* We are guaranteed to pull a SCTP header. */ } } /* Do we need to take the next packet out of the queue to process? */ if (!chunk) { struct list_head *entry; /* Is the queue empty? */ if (list_empty(&queue->in_chunk_list)) return NULL; entry = queue->in_chunk_list.next; chunk = queue->in_progress = list_entry(entry, struct sctp_chunk, list); list_del_init(entry); /* This is the first chunk in the packet. */ chunk->singleton = 1; ch = (sctp_chunkhdr_t *) chunk->skb->data; chunk->data_accepted = 0; } chunk->chunk_hdr = ch; chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); /* In the unlikely case of an IP reassembly, the skb could be * non-linear. If so, update chunk_end so that it doesn't go past * the skb->tail. */ if (unlikely(skb_is_nonlinear(chunk->skb))) { if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) chunk->chunk_end = skb_tail_pointer(chunk->skb); } skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t)); chunk->subh.v = NULL; /* Subheader is no longer valid. */ if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) < skb_tail_pointer(chunk->skb)) { /* This is not a singleton */ chunk->singleton = 0; } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { /* Discard inside state machine. */ chunk->pdiscard = 1; chunk->chunk_end = skb_tail_pointer(chunk->skb); } else { /* We are at the end of the packet, so mark the chunk * in case we need to send a SACK. */ chunk->end_of_packet = 1; } pr_debug("+++sctp_inq_pop+++ chunk:%p[%s], length:%d, skb->len:%d\n", chunk, sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)), ntohs(chunk->chunk_hdr->length), chunk->skb->len); return chunk; }
166,330
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void voutf(struct GlobalConfig *config, const char *prefix, const char *fmt, va_list ap) { size_t width = (79 - strlen(prefix)); if(!config->mute) { size_t len; char *ptr; char *print_buffer; print_buffer = curlx_mvaprintf(fmt, ap); if(!print_buffer) return; len = strlen(print_buffer); ptr = print_buffer; while(len > 0) { fputs(prefix, config->errors); if(len > width) { size_t cut = width-1; while(!ISSPACE(ptr[cut]) && cut) { cut--; } if(0 == cut) /* not a single cutting position was found, just cut it at the max text width then! */ cut = width-1; (void)fwrite(ptr, cut + 1, 1, config->errors); fputs("\n", config->errors); ptr += cut + 1; /* skip the space too */ len -= cut; } else { fputs(ptr, config->errors); len = 0; } } curl_free(print_buffer); } } Commit Message: voutf: fix bad arethmetic when outputting warnings to stderr CVE-2018-16842 Reported-by: Brian Carpenter Bug: https://curl.haxx.se/docs/CVE-2018-16842.html CWE ID: CWE-125
static void voutf(struct GlobalConfig *config, const char *prefix, const char *fmt, va_list ap) { size_t width = (79 - strlen(prefix)); if(!config->mute) { size_t len; char *ptr; char *print_buffer; print_buffer = curlx_mvaprintf(fmt, ap); if(!print_buffer) return; len = strlen(print_buffer); ptr = print_buffer; while(len > 0) { fputs(prefix, config->errors); if(len > width) { size_t cut = width-1; while(!ISSPACE(ptr[cut]) && cut) { cut--; } if(0 == cut) /* not a single cutting position was found, just cut it at the max text width then! */ cut = width-1; (void)fwrite(ptr, cut + 1, 1, config->errors); fputs("\n", config->errors); ptr += cut + 1; /* skip the space too */ len -= cut + 1; } else { fputs(ptr, config->errors); len = 0; } } curl_free(print_buffer); } }
169,029
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool ExtensionTtsPlatformImplWin::Speak( const std::string& src_utterance, const std::string& language, const std::string& gender, double rate, double pitch, double volume) { std::wstring utterance = UTF8ToUTF16(src_utterance); if (!speech_synthesizer_) return false; if (rate >= 0.0) { speech_synthesizer_->SetRate(static_cast<int32>(rate * 20 - 10)); } if (pitch >= 0.0) { std::wstring pitch_value = base::IntToString16(static_cast<int>(pitch * 20 - 10)); utterance = L"<pitch absmiddle=\"" + pitch_value + L"\">" + utterance + L"</pitch>"; } if (volume >= 0.0) { speech_synthesizer_->SetVolume(static_cast<uint16>(volume * 100)); } if (paused_) { speech_synthesizer_->Resume(); paused_ = false; } speech_synthesizer_->Speak( utterance.c_str(), SPF_ASYNC | SPF_PURGEBEFORESPEAK, NULL); return true; } Commit Message: Extend TTS extension API to support richer events returned from the engine to the client. Previously we just had a completed event; this adds start, word boundary, sentence boundary, and marker boundary. In addition, interrupted and canceled, which were previously errors, now become events. Mac and Windows implementations extended to support as many of these events as possible. BUG=67713 BUG=70198 BUG=75106 BUG=83404 TEST=Updates all TTS API tests to be event-based, and adds new tests. Review URL: http://codereview.chromium.org/6792014 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@91665 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-20
bool ExtensionTtsPlatformImplWin::Speak( int utterance_id, const std::string& src_utterance, const std::string& lang, const UtteranceContinuousParameters& params) { std::wstring prefix; std::wstring suffix; if (!speech_synthesizer_) return false; // TODO(dmazzoni): support languages other than the default: crbug.com/88059 if (params.rate >= 0.0) { // Map our multiplicative range of 0.1x to 10.0x onto Microsoft's // linear range of -10 to 10: // 0.1 -> -10 // 1.0 -> 0 // 10.0 -> 10 speech_synthesizer_->SetRate(static_cast<int32>(10 * log10(params.rate))); } if (params.pitch >= 0.0) { std::wstring pitch_value = base::IntToString16(static_cast<int>(params.pitch * 10 - 10)); prefix = L"<pitch absmiddle=\"" + pitch_value + L"\">"; suffix = L"</pitch>"; } if (params.volume >= 0.0) { speech_synthesizer_->SetVolume(static_cast<uint16>(params.volume * 100)); } if (paused_) { speech_synthesizer_->Resume(); paused_ = false; } // TODO(dmazzoni): convert SSML to SAPI xml. http://crbug.com/88072 utterance_ = UTF8ToWide(src_utterance); utterance_id_ = utterance_id; char_position_ = 0; std::wstring merged_utterance = prefix + utterance_ + suffix; prefix_len_ = prefix.size(); HRESULT result = speech_synthesizer_->Speak( merged_utterance.c_str(), SPF_ASYNC | SPF_PURGEBEFORESPEAK, &stream_number_); return (result == S_OK); }
170,402
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void utf32_to_utf8(const char32_t* src, size_t src_len, char* dst) { if (src == NULL || src_len == 0 || dst == NULL) { return; } const char32_t *cur_utf32 = src; const char32_t *end_utf32 = src + src_len; char *cur = dst; while (cur_utf32 < end_utf32) { size_t len = utf32_codepoint_utf8_length(*cur_utf32); utf32_codepoint_to_utf8((uint8_t *)cur, *cur_utf32++, len); cur += len; } *cur = '\0'; } Commit Message: libutils/Unicode.cpp: Correct length computation and add checks for utf16->utf8 Inconsistent behaviour between utf16_to_utf8 and utf16_to_utf8_length is causing a heap overflow. Correcting the length computation and adding bound checks to the conversion functions. Test: ran libutils_tests Bug: 29250543 Change-Id: I6115e3357141ed245c63c6eb25fc0fd0a9a7a2bb (cherry picked from commit c4966a363e46d2e1074d1a365e232af0dcedd6a1) CWE ID: CWE-119
void utf32_to_utf8(const char32_t* src, size_t src_len, char* dst) void utf32_to_utf8(const char32_t* src, size_t src_len, char* dst, size_t dst_len) { if (src == NULL || src_len == 0 || dst == NULL) { return; } const char32_t *cur_utf32 = src; const char32_t *end_utf32 = src + src_len; char *cur = dst; while (cur_utf32 < end_utf32) { size_t len = utf32_codepoint_utf8_length(*cur_utf32); LOG_ALWAYS_FATAL_IF(dst_len < len, "%zu < %zu", dst_len, len); utf32_codepoint_to_utf8((uint8_t *)cur, *cur_utf32++, len); cur += len; dst_len -= len; } LOG_ALWAYS_FATAL_IF(dst_len < 1, "dst_len < 1: %zu < 1", dst_len); *cur = '\0'; }
173,421
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int ext4_get_block_write(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { handle_t *handle = NULL; int ret = 0; unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; int dio_credits; ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", inode->i_ino, create); /* * ext4_get_block in prepare for a DIO write or buffer write. * We allocate an uinitialized extent if blocks haven't been allocated. * The extent will be converted to initialized after IO complete. */ create = EXT4_GET_BLOCKS_IO_CREATE_EXT; if (max_blocks > DIO_MAX_BLOCKS) max_blocks = DIO_MAX_BLOCKS; dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); handle = ext4_journal_start(inode, dio_credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; } ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, create); if (ret > 0) { bh_result->b_size = (ret << inode->i_blkbits); ret = 0; } ext4_journal_stop(handle); out: return ret; } Commit Message: ext4: use ext4_get_block_write in buffer write Allocate uninitialized extent before ext4 buffer write and convert the extent to initialized after io completes. The purpose is to make sure an extent can only be marked initialized after it has been written with new data so we can safely drop the i_mutex lock in ext4 DIO read without exposing stale data. This helps to improve multi-thread DIO read performance on high-speed disks. Skip the nobh and data=journal mount cases to make things simple for now. Signed-off-by: Jiaying Zhang <[email protected]> Signed-off-by: "Theodore Ts'o" <[email protected]> CWE ID:
static int ext4_get_block_write(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { handle_t *handle = ext4_journal_current_handle(); int ret = 0; unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; int dio_credits; int started = 0; ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", inode->i_ino, create); /* * ext4_get_block in prepare for a DIO write or buffer write. * We allocate an uinitialized extent if blocks haven't been allocated. * The extent will be converted to initialized after IO complete. */ create = EXT4_GET_BLOCKS_IO_CREATE_EXT; if (!handle) { if (max_blocks > DIO_MAX_BLOCKS) max_blocks = DIO_MAX_BLOCKS; dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); handle = ext4_journal_start(inode, dio_credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; } started = 1; } ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, create); if (ret > 0) { bh_result->b_size = (ret << inode->i_blkbits); ret = 0; } if (started) ext4_journal_stop(handle); out: return ret; }
167,545
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static Image *ReadSGIImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType status; MagickSizeType number_pixels; MemoryInfo *pixel_info; register Quantum *q; register ssize_t i, x; register unsigned char *p; SGIInfo iris_info; size_t bytes_per_pixel, quantum; ssize_t count, y, z; unsigned char *pixels; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read SGI raster header. */ iris_info.magic=ReadBlobMSBShort(image); do { /* Verify SGI identifier. */ if (iris_info.magic != 0x01DA) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); iris_info.storage=(unsigned char) ReadBlobByte(image); switch (iris_info.storage) { case 0x00: image->compression=NoCompression; break; case 0x01: image->compression=RLECompression; break; default: ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } iris_info.bytes_per_pixel=(unsigned char) ReadBlobByte(image); if ((iris_info.bytes_per_pixel == 0) || (iris_info.bytes_per_pixel > 2)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); iris_info.dimension=ReadBlobMSBShort(image); iris_info.columns=ReadBlobMSBShort(image); iris_info.rows=ReadBlobMSBShort(image); iris_info.depth=ReadBlobMSBShort(image); if ((iris_info.depth == 0) || (iris_info.depth > 4)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); iris_info.minimum_value=ReadBlobMSBLong(image); iris_info.maximum_value=ReadBlobMSBLong(image); iris_info.sans=ReadBlobMSBLong(image); (void) ReadBlob(image,sizeof(iris_info.name),(unsigned char *) iris_info.name); iris_info.name[sizeof(iris_info.name)-1]='\0'; if (*iris_info.name != '\0') (void) SetImageProperty(image,"label",iris_info.name,exception); iris_info.pixel_format=ReadBlobMSBLong(image); if (iris_info.pixel_format != 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); count=ReadBlob(image,sizeof(iris_info.filler),iris_info.filler); (void) count; image->columns=iris_info.columns; image->rows=iris_info.rows; image->depth=(size_t) MagickMin(iris_info.depth,MAGICKCORE_QUANTUM_DEPTH); if (iris_info.pixel_format == 0) image->depth=(size_t) MagickMin((size_t) 8* iris_info.bytes_per_pixel,MAGICKCORE_QUANTUM_DEPTH); if (iris_info.depth < 3) { image->storage_class=PseudoClass; image->colors=iris_info.bytes_per_pixel > 1 ? 65535 : 256; } if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); /* Allocate SGI pixels. */ bytes_per_pixel=(size_t) iris_info.bytes_per_pixel; number_pixels=(MagickSizeType) iris_info.columns*iris_info.rows; if ((4*bytes_per_pixel*number_pixels) != ((MagickSizeType) (size_t) (4*bytes_per_pixel*number_pixels))) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); pixel_info=AcquireVirtualMemory(iris_info.columns,iris_info.rows*4* bytes_per_pixel*sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); if ((int) iris_info.storage != 0x01) { unsigned char *scanline; /* Read standard image format. */ scanline=(unsigned char *) AcquireQuantumMemory(iris_info.columns, bytes_per_pixel*sizeof(*scanline)); if (scanline == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (z=0; z < (ssize_t) iris_info.depth; z++) { p=pixels+bytes_per_pixel*z; for (y=0; y < (ssize_t) iris_info.rows; y++) { count=ReadBlob(image,bytes_per_pixel*iris_info.columns,scanline); if (EOFBlob(image) != MagickFalse) break; if (bytes_per_pixel == 2) for (x=0; x < (ssize_t) iris_info.columns; x++) { *p=scanline[2*x]; *(p+1)=scanline[2*x+1]; p+=8; } else for (x=0; x < (ssize_t) iris_info.columns; x++) { *p=scanline[x]; p+=4; } } } scanline=(unsigned char *) RelinquishMagickMemory(scanline); } else { MemoryInfo *packet_info; size_t *runlength; ssize_t offset, *offsets; unsigned char *packets; unsigned int data_order; /* Read runlength-encoded image format. */ offsets=(ssize_t *) AcquireQuantumMemory((size_t) iris_info.rows, iris_info.depth*sizeof(*offsets)); runlength=(size_t *) AcquireQuantumMemory(iris_info.rows, iris_info.depth*sizeof(*runlength)); packet_info=AcquireVirtualMemory((size_t) iris_info.columns+10UL,4UL* sizeof(*packets)); if ((offsets == (ssize_t *) NULL) || (runlength == (size_t *) NULL) || (packet_info == (MemoryInfo *) NULL)) { if (offsets == (ssize_t *) NULL) offsets=(ssize_t *) RelinquishMagickMemory(offsets); if (runlength == (size_t *) NULL) runlength=(size_t *) RelinquishMagickMemory(runlength); if (packet_info == (MemoryInfo *) NULL) packet_info=RelinquishVirtualMemory(packet_info); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } packets=(unsigned char *) GetVirtualMemoryBlob(packet_info); for (i=0; i < (ssize_t) (iris_info.rows*iris_info.depth); i++) offsets[i]=ReadBlobMSBSignedLong(image); for (i=0; i < (ssize_t) (iris_info.rows*iris_info.depth); i++) { runlength[i]=ReadBlobMSBLong(image); if (runlength[i] > (4*(size_t) iris_info.columns+10)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } /* Check data order. */ offset=0; data_order=0; for (y=0; ((y < (ssize_t) iris_info.rows) && (data_order == 0)); y++) for (z=0; ((z < (ssize_t) iris_info.depth) && (data_order == 0)); z++) { if (offsets[y+z*iris_info.rows] < offset) data_order=1; offset=offsets[y+z*iris_info.rows]; } offset=(ssize_t) TellBlob(image); if (data_order == 1) { for (z=0; z < (ssize_t) iris_info.depth; z++) { p=pixels; for (y=0; y < (ssize_t) iris_info.rows; y++) { if (offset != offsets[y+z*iris_info.rows]) { offset=offsets[y+z*iris_info.rows]; offset=(ssize_t) SeekBlob(image,(ssize_t) offset,SEEK_SET); } count=ReadBlob(image,(size_t) runlength[y+z*iris_info.rows], packets); if (EOFBlob(image) != MagickFalse) break; offset+=(ssize_t) runlength[y+z*iris_info.rows]; status=SGIDecode(bytes_per_pixel,(ssize_t) (runlength[y+z*iris_info.rows]/bytes_per_pixel),packets, 1L*iris_info.columns,p+bytes_per_pixel*z); if (status == MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); p+=(iris_info.columns*4*bytes_per_pixel); } } } else { MagickOffsetType position; position=TellBlob(image); p=pixels; for (y=0; y < (ssize_t) iris_info.rows; y++) { for (z=0; z < (ssize_t) iris_info.depth; z++) { if (offset != offsets[y+z*iris_info.rows]) { offset=offsets[y+z*iris_info.rows]; offset=(ssize_t) SeekBlob(image,(ssize_t) offset,SEEK_SET); } count=ReadBlob(image,(size_t) runlength[y+z*iris_info.rows], packets); if (EOFBlob(image) != MagickFalse) break; offset+=(ssize_t) runlength[y+z*iris_info.rows]; status=SGIDecode(bytes_per_pixel,(ssize_t) (runlength[y+z*iris_info.rows]/bytes_per_pixel),packets, 1L*iris_info.columns,p+bytes_per_pixel*z); if (status == MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } p+=(iris_info.columns*4*bytes_per_pixel); } offset=(ssize_t) SeekBlob(image,position,SEEK_SET); } packet_info=RelinquishVirtualMemory(packet_info); runlength=(size_t *) RelinquishMagickMemory(runlength); offsets=(ssize_t *) RelinquishMagickMemory(offsets); } /* Initialize image structure. */ image->alpha_trait=iris_info.depth == 4 ? BlendPixelTrait : UndefinedPixelTrait; image->columns=iris_info.columns; image->rows=iris_info.rows; /* Convert SGI raster image to pixel packets. */ if (image->storage_class == DirectClass) { /* Convert SGI image to DirectClass pixel packets. */ if (bytes_per_pixel == 2) { for (y=0; y < (ssize_t) image->rows; y++) { p=pixels+(image->rows-y-1)*8*image->columns; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(image,ScaleShortToQuantum((unsigned short) ((*(p+0) << 8) | (*(p+1)))),q); SetPixelGreen(image,ScaleShortToQuantum((unsigned short) ((*(p+2) << 8) | (*(p+3)))),q); SetPixelBlue(image,ScaleShortToQuantum((unsigned short) ((*(p+4) << 8) | (*(p+5)))),q); SetPixelAlpha(image,OpaqueAlpha,q); if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,ScaleShortToQuantum((unsigned short) ((*(p+6) << 8) | (*(p+7)))),q); p+=8; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } } else for (y=0; y < (ssize_t) image->rows; y++) { p=pixels+(image->rows-y-1)*4*image->columns; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(image,ScaleCharToQuantum(*p),q); SetPixelGreen(image,ScaleCharToQuantum(*(p+1)),q); SetPixelBlue(image,ScaleCharToQuantum(*(p+2)),q); SetPixelAlpha(image,OpaqueAlpha,q); if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,ScaleCharToQuantum(*(p+3)),q); p+=4; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } else { /* Create grayscale map. */ if (AcquireImageColormap(image,image->colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* Convert SGI image to PseudoClass pixel packets. */ if (bytes_per_pixel == 2) { for (y=0; y < (ssize_t) image->rows; y++) { p=pixels+(image->rows-y-1)*8*image->columns; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { quantum=(*p << 8); quantum|=(*(p+1)); SetPixelIndex(image,(Quantum) quantum,q); p+=8; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } } else for (y=0; y < (ssize_t) image->rows; y++) { p=pixels+(image->rows-y-1)*4*image->columns; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,*p,q); p+=4; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } (void) SyncImage(image,exception); } pixel_info=RelinquishVirtualMemory(pixel_info); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; iris_info.magic=ReadBlobMSBShort(image); if (iris_info.magic == 0x01DA) { /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } while (iris_info.magic == 0x01DA); (void) CloseBlob(image); return(GetFirstImageInList(image)); } Commit Message: Prevent buffer overflow in BMP & SGI coders (bug report from pwchen&rayzhong of tencent) CWE ID: CWE-125
static Image *ReadSGIImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType status; MagickSizeType number_pixels; MemoryInfo *pixel_info; register Quantum *q; register ssize_t i, x; register unsigned char *p; SGIInfo iris_info; size_t bytes_per_pixel, quantum; ssize_t count, y, z; unsigned char *pixels; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read SGI raster header. */ iris_info.magic=ReadBlobMSBShort(image); do { /* Verify SGI identifier. */ if (iris_info.magic != 0x01DA) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); iris_info.storage=(unsigned char) ReadBlobByte(image); switch (iris_info.storage) { case 0x00: image->compression=NoCompression; break; case 0x01: image->compression=RLECompression; break; default: ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } iris_info.bytes_per_pixel=(unsigned char) ReadBlobByte(image); if ((iris_info.bytes_per_pixel == 0) || (iris_info.bytes_per_pixel > 2)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); iris_info.dimension=ReadBlobMSBShort(image); iris_info.columns=ReadBlobMSBShort(image); iris_info.rows=ReadBlobMSBShort(image); iris_info.depth=ReadBlobMSBShort(image); if ((iris_info.depth == 0) || (iris_info.depth > 4)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); iris_info.minimum_value=ReadBlobMSBLong(image); iris_info.maximum_value=ReadBlobMSBLong(image); iris_info.sans=ReadBlobMSBLong(image); (void) ReadBlob(image,sizeof(iris_info.name),(unsigned char *) iris_info.name); iris_info.name[sizeof(iris_info.name)-1]='\0'; if (*iris_info.name != '\0') (void) SetImageProperty(image,"label",iris_info.name,exception); iris_info.pixel_format=ReadBlobMSBLong(image); if (iris_info.pixel_format != 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); count=ReadBlob(image,sizeof(iris_info.filler),iris_info.filler); (void) count; image->columns=iris_info.columns; image->rows=iris_info.rows; image->depth=(size_t) MagickMin(iris_info.depth,MAGICKCORE_QUANTUM_DEPTH); if (iris_info.pixel_format == 0) image->depth=(size_t) MagickMin((size_t) 8*iris_info.bytes_per_pixel, MAGICKCORE_QUANTUM_DEPTH); if (iris_info.depth < 3) { image->storage_class=PseudoClass; image->colors=iris_info.bytes_per_pixel > 1 ? 65535 : 256; } if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); /* Allocate SGI pixels. */ bytes_per_pixel=(size_t) iris_info.bytes_per_pixel; number_pixels=(MagickSizeType) iris_info.columns*iris_info.rows; if ((4*bytes_per_pixel*number_pixels) != ((MagickSizeType) (size_t) (4*bytes_per_pixel*number_pixels))) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); pixel_info=AcquireVirtualMemory(iris_info.columns,iris_info.rows*4* bytes_per_pixel*sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); if ((int) iris_info.storage != 0x01) { unsigned char *scanline; /* Read standard image format. */ scanline=(unsigned char *) AcquireQuantumMemory(iris_info.columns, bytes_per_pixel*sizeof(*scanline)); if (scanline == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (z=0; z < (ssize_t) iris_info.depth; z++) { p=pixels+bytes_per_pixel*z; for (y=0; y < (ssize_t) iris_info.rows; y++) { count=ReadBlob(image,bytes_per_pixel*iris_info.columns,scanline); if (EOFBlob(image) != MagickFalse) break; if (bytes_per_pixel == 2) for (x=0; x < (ssize_t) iris_info.columns; x++) { *p=scanline[2*x]; *(p+1)=scanline[2*x+1]; p+=8; } else for (x=0; x < (ssize_t) iris_info.columns; x++) { *p=scanline[x]; p+=4; } } } scanline=(unsigned char *) RelinquishMagickMemory(scanline); } else { MemoryInfo *packet_info; size_t *runlength; ssize_t offset, *offsets; unsigned char *packets; unsigned int data_order; /* Read runlength-encoded image format. */ offsets=(ssize_t *) AcquireQuantumMemory((size_t) iris_info.rows, iris_info.depth*sizeof(*offsets)); runlength=(size_t *) AcquireQuantumMemory(iris_info.rows, iris_info.depth*sizeof(*runlength)); packet_info=AcquireVirtualMemory((size_t) iris_info.columns+10UL,4UL* sizeof(*packets)); if ((offsets == (ssize_t *) NULL) || (runlength == (size_t *) NULL) || (packet_info == (MemoryInfo *) NULL)) { if (offsets == (ssize_t *) NULL) offsets=(ssize_t *) RelinquishMagickMemory(offsets); if (runlength == (size_t *) NULL) runlength=(size_t *) RelinquishMagickMemory(runlength); if (packet_info == (MemoryInfo *) NULL) packet_info=RelinquishVirtualMemory(packet_info); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } packets=(unsigned char *) GetVirtualMemoryBlob(packet_info); for (i=0; i < (ssize_t) (iris_info.rows*iris_info.depth); i++) offsets[i]=ReadBlobMSBSignedLong(image); for (i=0; i < (ssize_t) (iris_info.rows*iris_info.depth); i++) { runlength[i]=ReadBlobMSBLong(image); if (runlength[i] > (4*(size_t) iris_info.columns+10)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } /* Check data order. */ offset=0; data_order=0; for (y=0; ((y < (ssize_t) iris_info.rows) && (data_order == 0)); y++) for (z=0; ((z < (ssize_t) iris_info.depth) && (data_order == 0)); z++) { if (offsets[y+z*iris_info.rows] < offset) data_order=1; offset=offsets[y+z*iris_info.rows]; } offset=(ssize_t) TellBlob(image); if (data_order == 1) { for (z=0; z < (ssize_t) iris_info.depth; z++) { p=pixels; for (y=0; y < (ssize_t) iris_info.rows; y++) { if (offset != offsets[y+z*iris_info.rows]) { offset=offsets[y+z*iris_info.rows]; offset=(ssize_t) SeekBlob(image,(ssize_t) offset,SEEK_SET); } count=ReadBlob(image,(size_t) runlength[y+z*iris_info.rows], packets); if (EOFBlob(image) != MagickFalse) break; offset+=(ssize_t) runlength[y+z*iris_info.rows]; status=SGIDecode(bytes_per_pixel,(ssize_t) (runlength[y+z*iris_info.rows]/bytes_per_pixel),packets, 1L*iris_info.columns,p+bytes_per_pixel*z); if (status == MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); p+=(iris_info.columns*4*bytes_per_pixel); } } } else { MagickOffsetType position; position=TellBlob(image); p=pixels; for (y=0; y < (ssize_t) iris_info.rows; y++) { for (z=0; z < (ssize_t) iris_info.depth; z++) { if (offset != offsets[y+z*iris_info.rows]) { offset=offsets[y+z*iris_info.rows]; offset=(ssize_t) SeekBlob(image,(ssize_t) offset,SEEK_SET); } count=ReadBlob(image,(size_t) runlength[y+z*iris_info.rows], packets); if (EOFBlob(image) != MagickFalse) break; offset+=(ssize_t) runlength[y+z*iris_info.rows]; status=SGIDecode(bytes_per_pixel,(ssize_t) (runlength[y+z*iris_info.rows]/bytes_per_pixel),packets, 1L*iris_info.columns,p+bytes_per_pixel*z); if (status == MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } p+=(iris_info.columns*4*bytes_per_pixel); } offset=(ssize_t) SeekBlob(image,position,SEEK_SET); } packet_info=RelinquishVirtualMemory(packet_info); runlength=(size_t *) RelinquishMagickMemory(runlength); offsets=(ssize_t *) RelinquishMagickMemory(offsets); } /* Initialize image structure. */ image->alpha_trait=iris_info.depth == 4 ? BlendPixelTrait : UndefinedPixelTrait; image->columns=iris_info.columns; image->rows=iris_info.rows; /* Convert SGI raster image to pixel packets. */ if (image->storage_class == DirectClass) { /* Convert SGI image to DirectClass pixel packets. */ if (bytes_per_pixel == 2) { for (y=0; y < (ssize_t) image->rows; y++) { p=pixels+(image->rows-y-1)*8*image->columns; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(image,ScaleShortToQuantum((unsigned short) ((*(p+0) << 8) | (*(p+1)))),q); SetPixelGreen(image,ScaleShortToQuantum((unsigned short) ((*(p+2) << 8) | (*(p+3)))),q); SetPixelBlue(image,ScaleShortToQuantum((unsigned short) ((*(p+4) << 8) | (*(p+5)))),q); SetPixelAlpha(image,OpaqueAlpha,q); if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,ScaleShortToQuantum((unsigned short) ((*(p+6) << 8) | (*(p+7)))),q); p+=8; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } } else for (y=0; y < (ssize_t) image->rows; y++) { p=pixels+(image->rows-y-1)*4*image->columns; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(image,ScaleCharToQuantum(*p),q); SetPixelGreen(image,ScaleCharToQuantum(*(p+1)),q); SetPixelBlue(image,ScaleCharToQuantum(*(p+2)),q); SetPixelAlpha(image,OpaqueAlpha,q); if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,ScaleCharToQuantum(*(p+3)),q); p+=4; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } else { /* Create grayscale map. */ if (AcquireImageColormap(image,image->colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* Convert SGI image to PseudoClass pixel packets. */ if (bytes_per_pixel == 2) { for (y=0; y < (ssize_t) image->rows; y++) { p=pixels+(image->rows-y-1)*8*image->columns; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { quantum=(*p << 8); quantum|=(*(p+1)); SetPixelIndex(image,(Quantum) quantum,q); p+=8; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } } else for (y=0; y < (ssize_t) image->rows; y++) { p=pixels+(image->rows-y-1)*4*image->columns; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,*p,q); p+=4; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } (void) SyncImage(image,exception); } pixel_info=RelinquishVirtualMemory(pixel_info); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; iris_info.magic=ReadBlobMSBShort(image); if (iris_info.magic == 0x01DA) { /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } while (iris_info.magic == 0x01DA); (void) CloseBlob(image); return(GetFirstImageInList(image)); }
168,815
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool TypedUrlModelAssociator::AssociateModels() { VLOG(1) << "Associating TypedUrl Models"; DCHECK(expected_loop_ == MessageLoop::current()); std::vector<history::URLRow> typed_urls; if (!history_backend_->GetAllTypedURLs(&typed_urls)) { LOG(ERROR) << "Could not get the typed_url entries."; return false; } std::map<history::URLID, history::VisitVector> visit_vectors; for (std::vector<history::URLRow>::iterator ix = typed_urls.begin(); ix != typed_urls.end(); ++ix) { if (!history_backend_->GetVisitsForURL(ix->id(), &(visit_vectors[ix->id()]))) { LOG(ERROR) << "Could not get the url's visits."; return false; } if (visit_vectors[ix->id()].empty()) { history::VisitRow visit( ix->id(), ix->last_visit(), 0, PageTransition::TYPED, 0); visit_vectors[ix->id()].push_back(visit); } } TypedUrlTitleVector titles; TypedUrlVector new_urls; TypedUrlVisitVector new_visits; TypedUrlUpdateVector updated_urls; { sync_api::WriteTransaction trans(sync_service_->GetUserShare()); sync_api::ReadNode typed_url_root(&trans); if (!typed_url_root.InitByTagLookup(kTypedUrlTag)) { LOG(ERROR) << "Server did not create the top-level typed_url node. We " << "might be running against an out-of-date server."; return false; } std::set<std::string> current_urls; for (std::vector<history::URLRow>::iterator ix = typed_urls.begin(); ix != typed_urls.end(); ++ix) { std::string tag = ix->url().spec(); history::VisitVector& visits = visit_vectors[ix->id()]; sync_api::ReadNode node(&trans); if (node.InitByClientTagLookup(syncable::TYPED_URLS, tag)) { const sync_pb::TypedUrlSpecifics& typed_url( node.GetTypedUrlSpecifics()); DCHECK_EQ(tag, typed_url.url()); history::URLRow new_url(*ix); std::vector<history::VisitInfo> added_visits; int difference = MergeUrls(typed_url, *ix, &visits, &new_url, &added_visits); if (difference & DIFF_UPDATE_NODE) { sync_api::WriteNode write_node(&trans); if (!write_node.InitByClientTagLookup(syncable::TYPED_URLS, tag)) { LOG(ERROR) << "Failed to edit typed_url sync node."; return false; } if (typed_url.visits_size() > 0) { base::Time earliest_visit = base::Time::FromInternalValue(typed_url.visits(0)); for (history::VisitVector::iterator it = visits.begin(); it != visits.end() && it->visit_time < earliest_visit; ) { it = visits.erase(it); } DCHECK(visits.size() > 0); } else { NOTREACHED() << "Syncing typed URL with no visits: " << typed_url.url(); } WriteToSyncNode(new_url, visits, &write_node); } if (difference & DIFF_LOCAL_TITLE_CHANGED) { titles.push_back(std::pair<GURL, string16>(new_url.url(), new_url.title())); } if (difference & DIFF_LOCAL_ROW_CHANGED) { updated_urls.push_back( std::pair<history::URLID, history::URLRow>(ix->id(), new_url)); } if (difference & DIFF_LOCAL_VISITS_ADDED) { new_visits.push_back( std::pair<GURL, std::vector<history::VisitInfo> >(ix->url(), added_visits)); } Associate(&tag, node.GetId()); } else { sync_api::WriteNode node(&trans); if (!node.InitUniqueByCreation(syncable::TYPED_URLS, typed_url_root, tag)) { LOG(ERROR) << "Failed to create typed_url sync node."; return false; } node.SetTitle(UTF8ToWide(tag)); WriteToSyncNode(*ix, visits, &node); Associate(&tag, node.GetId()); } current_urls.insert(tag); } int64 sync_child_id = typed_url_root.GetFirstChildId(); while (sync_child_id != sync_api::kInvalidId) { sync_api::ReadNode sync_child_node(&trans); if (!sync_child_node.InitByIdLookup(sync_child_id)) { LOG(ERROR) << "Failed to fetch child node."; return false; } const sync_pb::TypedUrlSpecifics& typed_url( sync_child_node.GetTypedUrlSpecifics()); if (current_urls.find(typed_url.url()) == current_urls.end()) { new_visits.push_back( std::pair<GURL, std::vector<history::VisitInfo> >( GURL(typed_url.url()), std::vector<history::VisitInfo>())); std::vector<history::VisitInfo>& visits = new_visits.back().second; history::URLRow new_url(GURL(typed_url.url())); TypedUrlModelAssociator::UpdateURLRowFromTypedUrlSpecifics( typed_url, &new_url); for (int c = 0; c < typed_url.visits_size(); ++c) { DCHECK(c == 0 || typed_url.visits(c) > typed_url.visits(c - 1)); DCHECK_LE(typed_url.visit_transitions(c), static_cast<int>(PageTransition::LAST_CORE)); visits.push_back(history::VisitInfo( base::Time::FromInternalValue(typed_url.visits(c)), static_cast<PageTransition::Type>( typed_url.visit_transitions(c)))); } Associate(&typed_url.url(), sync_child_node.GetId()); new_urls.push_back(new_url); } sync_child_id = sync_child_node.GetSuccessorId(); } } return WriteToHistoryBackend(&titles, &new_urls, &updated_urls, &new_visits, NULL); } Commit Message: Now ignores obsolete sync nodes without visit transitions. Also removed assertion that was erroneously triggered by obsolete sync nodes. BUG=none TEST=run chrome against a database that contains obsolete typed url sync nodes. Review URL: http://codereview.chromium.org/7129069 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@88846 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
bool TypedUrlModelAssociator::AssociateModels() { VLOG(1) << "Associating TypedUrl Models"; DCHECK(expected_loop_ == MessageLoop::current()); std::vector<history::URLRow> typed_urls; if (!history_backend_->GetAllTypedURLs(&typed_urls)) { LOG(ERROR) << "Could not get the typed_url entries."; return false; } std::map<history::URLID, history::VisitVector> visit_vectors; for (std::vector<history::URLRow>::iterator ix = typed_urls.begin(); ix != typed_urls.end(); ++ix) { if (!history_backend_->GetVisitsForURL(ix->id(), &(visit_vectors[ix->id()]))) { LOG(ERROR) << "Could not get the url's visits."; return false; } if (visit_vectors[ix->id()].empty()) { history::VisitRow visit( ix->id(), ix->last_visit(), 0, PageTransition::TYPED, 0); visit_vectors[ix->id()].push_back(visit); } } TypedUrlTitleVector titles; TypedUrlVector new_urls; TypedUrlVisitVector new_visits; TypedUrlUpdateVector updated_urls; { sync_api::WriteTransaction trans(sync_service_->GetUserShare()); sync_api::ReadNode typed_url_root(&trans); if (!typed_url_root.InitByTagLookup(kTypedUrlTag)) { LOG(ERROR) << "Server did not create the top-level typed_url node. We " << "might be running against an out-of-date server."; return false; } std::set<std::string> current_urls; for (std::vector<history::URLRow>::iterator ix = typed_urls.begin(); ix != typed_urls.end(); ++ix) { std::string tag = ix->url().spec(); history::VisitVector& visits = visit_vectors[ix->id()]; sync_api::ReadNode node(&trans); if (node.InitByClientTagLookup(syncable::TYPED_URLS, tag)) { const sync_pb::TypedUrlSpecifics& typed_url( node.GetTypedUrlSpecifics()); DCHECK_EQ(tag, typed_url.url()); history::URLRow new_url(*ix); std::vector<history::VisitInfo> added_visits; int difference = MergeUrls(typed_url, *ix, &visits, &new_url, &added_visits); if (difference & DIFF_UPDATE_NODE) { sync_api::WriteNode write_node(&trans); if (!write_node.InitByClientTagLookup(syncable::TYPED_URLS, tag)) { LOG(ERROR) << "Failed to edit typed_url sync node."; return false; } if (typed_url.visits_size() > 0) { base::Time earliest_visit = base::Time::FromInternalValue(typed_url.visits(0)); for (history::VisitVector::iterator it = visits.begin(); it != visits.end() && it->visit_time < earliest_visit; ) { it = visits.erase(it); } DCHECK(visits.size() > 0); } WriteToSyncNode(new_url, visits, &write_node); } if (difference & DIFF_LOCAL_TITLE_CHANGED) { titles.push_back(std::pair<GURL, string16>(new_url.url(), new_url.title())); } if (difference & DIFF_LOCAL_ROW_CHANGED) { updated_urls.push_back( std::pair<history::URLID, history::URLRow>(ix->id(), new_url)); } if (difference & DIFF_LOCAL_VISITS_ADDED) { new_visits.push_back( std::pair<GURL, std::vector<history::VisitInfo> >(ix->url(), added_visits)); } Associate(&tag, node.GetId()); } else { sync_api::WriteNode node(&trans); if (!node.InitUniqueByCreation(syncable::TYPED_URLS, typed_url_root, tag)) { LOG(ERROR) << "Failed to create typed_url sync node."; return false; } node.SetTitle(UTF8ToWide(tag)); WriteToSyncNode(*ix, visits, &node); Associate(&tag, node.GetId()); } current_urls.insert(tag); } int64 sync_child_id = typed_url_root.GetFirstChildId(); while (sync_child_id != sync_api::kInvalidId) { sync_api::ReadNode sync_child_node(&trans); if (!sync_child_node.InitByIdLookup(sync_child_id)) { LOG(ERROR) << "Failed to fetch child node."; return false; } const sync_pb::TypedUrlSpecifics& typed_url( sync_child_node.GetTypedUrlSpecifics()); sync_child_id = sync_child_node.GetSuccessorId(); // Ignore old sync nodes that don't have any transition data stored with // them. if (typed_url.visit_transitions_size() == 0) { VLOG(1) << "Ignoring obsolete sync node with no visit transition info."; continue; } if (current_urls.find(typed_url.url()) == current_urls.end()) { new_visits.push_back( std::pair<GURL, std::vector<history::VisitInfo> >( GURL(typed_url.url()), std::vector<history::VisitInfo>())); std::vector<history::VisitInfo>& visits = new_visits.back().second; history::URLRow new_url(GURL(typed_url.url())); TypedUrlModelAssociator::UpdateURLRowFromTypedUrlSpecifics( typed_url, &new_url); for (int c = 0; c < typed_url.visits_size(); ++c) { DCHECK(c == 0 || typed_url.visits(c) > typed_url.visits(c - 1)); DCHECK_LE(typed_url.visit_transitions(c), static_cast<int>(PageTransition::LAST_CORE)); visits.push_back(history::VisitInfo( base::Time::FromInternalValue(typed_url.visits(c)), static_cast<PageTransition::Type>( typed_url.visit_transitions(c)))); } Associate(&typed_url.url(), sync_child_node.GetId()); new_urls.push_back(new_url); } } } return WriteToHistoryBackend(&titles, &new_urls, &updated_urls, &new_visits, NULL); }
170,472
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: PassRefPtr<Attr> Element::setAttributeNode(Attr* attrNode, ExceptionCode& ec) { if (!attrNode) { ec = TYPE_MISMATCH_ERR; return 0; } RefPtr<Attr> oldAttrNode = attrIfExists(attrNode->qualifiedName()); if (oldAttrNode.get() == attrNode) return attrNode; // This Attr is already attached to the element. if (attrNode->ownerElement()) { ec = INUSE_ATTRIBUTE_ERR; return 0; } synchronizeAllAttributes(); UniqueElementData* elementData = ensureUniqueElementData(); size_t index = elementData->getAttributeItemIndex(attrNode->qualifiedName()); if (index != notFound) { if (oldAttrNode) detachAttrNodeFromElementWithValue(oldAttrNode.get(), elementData->attributeItem(index)->value()); else oldAttrNode = Attr::create(document(), attrNode->qualifiedName(), elementData->attributeItem(index)->value()); } setAttributeInternal(index, attrNode->qualifiedName(), attrNode->value(), NotInSynchronizationOfLazyAttribute); attrNode->attachToElement(this); ensureAttrNodeListForElement(this)->append(attrNode); return oldAttrNode.release(); } Commit Message: Set Attr.ownerDocument in Element#setAttributeNode() Attr objects can move across documents by setAttributeNode(). So It needs to reset ownerDocument through TreeScopeAdoptr::adoptIfNeeded(). BUG=248950 TEST=set-attribute-node-from-iframe.html Review URL: https://chromiumcodereview.appspot.com/17583003 git-svn-id: svn://svn.chromium.org/blink/trunk@152938 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-399
PassRefPtr<Attr> Element::setAttributeNode(Attr* attrNode, ExceptionCode& ec) { if (!attrNode) { ec = TYPE_MISMATCH_ERR; return 0; } RefPtr<Attr> oldAttrNode = attrIfExists(attrNode->qualifiedName()); if (oldAttrNode.get() == attrNode) return attrNode; // This Attr is already attached to the element. if (attrNode->ownerElement()) { ec = INUSE_ATTRIBUTE_ERR; return 0; } synchronizeAllAttributes(); UniqueElementData* elementData = ensureUniqueElementData(); size_t index = elementData->getAttributeItemIndex(attrNode->qualifiedName()); if (index != notFound) { if (oldAttrNode) detachAttrNodeFromElementWithValue(oldAttrNode.get(), elementData->attributeItem(index)->value()); else oldAttrNode = Attr::create(document(), attrNode->qualifiedName(), elementData->attributeItem(index)->value()); } setAttributeInternal(index, attrNode->qualifiedName(), attrNode->value(), NotInSynchronizationOfLazyAttribute); attrNode->attachToElement(this); treeScope()->adoptIfNeeded(attrNode); ensureAttrNodeListForElement(this)->append(attrNode); return oldAttrNode.release(); }
171,207
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static Image *ReadWPGImage(const ImageInfo *image_info, ExceptionInfo *exception) { typedef struct { size_t FileId; MagickOffsetType DataOffset; unsigned int ProductType; unsigned int FileType; unsigned char MajorVersion; unsigned char MinorVersion; unsigned int EncryptKey; unsigned int Reserved; } WPGHeader; typedef struct { unsigned char RecType; size_t RecordLength; } WPGRecord; typedef struct { unsigned char Class; unsigned char RecType; size_t Extension; size_t RecordLength; } WPG2Record; typedef struct { unsigned HorizontalUnits; unsigned VerticalUnits; unsigned char PosSizePrecision; } WPG2Start; typedef struct { unsigned int Width; unsigned int Height; unsigned int Depth; unsigned int HorzRes; unsigned int VertRes; } WPGBitmapType1; typedef struct { unsigned int Width; unsigned int Height; unsigned char Depth; unsigned char Compression; } WPG2BitmapType1; typedef struct { unsigned int RotAngle; unsigned int LowLeftX; unsigned int LowLeftY; unsigned int UpRightX; unsigned int UpRightY; unsigned int Width; unsigned int Height; unsigned int Depth; unsigned int HorzRes; unsigned int VertRes; } WPGBitmapType2; typedef struct { unsigned int StartIndex; unsigned int NumOfEntries; } WPGColorMapRec; /* typedef struct { size_t PS_unknown1; unsigned int PS_unknown2; unsigned int PS_unknown3; } WPGPSl1Record; */ Image *image; unsigned int status; WPGHeader Header; WPGRecord Rec; WPG2Record Rec2; WPG2Start StartWPG; WPGBitmapType1 BitmapHeader1; WPG2BitmapType1 Bitmap2Header1; WPGBitmapType2 BitmapHeader2; WPGColorMapRec WPG_Palette; int i, bpp, WPG2Flags; ssize_t ldblk; size_t one; unsigned char *BImgBuff; tCTM CTM; /*current transform matrix*/ /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); one=1; image=AcquireImage(image_info); image->depth=8; status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read WPG image. */ Header.FileId=ReadBlobLSBLong(image); Header.DataOffset=(MagickOffsetType) ReadBlobLSBLong(image); Header.ProductType=ReadBlobLSBShort(image); Header.FileType=ReadBlobLSBShort(image); Header.MajorVersion=ReadBlobByte(image); Header.MinorVersion=ReadBlobByte(image); Header.EncryptKey=ReadBlobLSBShort(image); Header.Reserved=ReadBlobLSBShort(image); if (Header.FileId!=0x435057FF || (Header.ProductType>>8)!=0x16) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (Header.EncryptKey!=0) ThrowReaderException(CoderError,"EncryptedWPGImageFileNotSupported"); image->columns = 1; image->rows = 1; image->colors = 0; bpp=0; BitmapHeader2.RotAngle=0; switch(Header.FileType) { case 1: /* WPG level 1 */ while(!EOFBlob(image)) /* object parser loop */ { (void) SeekBlob(image,Header.DataOffset,SEEK_SET); if(EOFBlob(image)) break; Rec.RecType=(i=ReadBlobByte(image)); if(i==EOF) break; Rd_WP_DWORD(image,&Rec.RecordLength); if(EOFBlob(image)) break; Header.DataOffset=TellBlob(image)+Rec.RecordLength; switch(Rec.RecType) { case 0x0B: /* bitmap type 1 */ BitmapHeader1.Width=ReadBlobLSBShort(image); BitmapHeader1.Height=ReadBlobLSBShort(image); if ((BitmapHeader1.Width == 0) || (BitmapHeader1.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); BitmapHeader1.Depth=ReadBlobLSBShort(image); BitmapHeader1.HorzRes=ReadBlobLSBShort(image); BitmapHeader1.VertRes=ReadBlobLSBShort(image); if(BitmapHeader1.HorzRes && BitmapHeader1.VertRes) { image->units=PixelsPerCentimeterResolution; image->x_resolution=BitmapHeader1.HorzRes/470.0; image->y_resolution=BitmapHeader1.VertRes/470.0; } image->columns=BitmapHeader1.Width; image->rows=BitmapHeader1.Height; bpp=BitmapHeader1.Depth; goto UnpackRaster; case 0x0E: /*Color palette */ WPG_Palette.StartIndex=ReadBlobLSBShort(image); WPG_Palette.NumOfEntries=ReadBlobLSBShort(image); image->colors=WPG_Palette.NumOfEntries; if (!AcquireImageColormap(image,image->colors)) goto NoMemory; for (i=WPG_Palette.StartIndex; i < (int)WPG_Palette.NumOfEntries; i++) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->colormap[i].green=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); } break; case 0x11: /* Start PS l1 */ if(Rec.RecordLength > 8) image=ExtractPostscript(image,image_info, TellBlob(image)+8, /* skip PS header in the wpg */ (ssize_t) Rec.RecordLength-8,exception); break; case 0x14: /* bitmap type 2 */ BitmapHeader2.RotAngle=ReadBlobLSBShort(image); BitmapHeader2.LowLeftX=ReadBlobLSBShort(image); BitmapHeader2.LowLeftY=ReadBlobLSBShort(image); BitmapHeader2.UpRightX=ReadBlobLSBShort(image); BitmapHeader2.UpRightY=ReadBlobLSBShort(image); BitmapHeader2.Width=ReadBlobLSBShort(image); BitmapHeader2.Height=ReadBlobLSBShort(image); if ((BitmapHeader2.Width == 0) || (BitmapHeader2.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); BitmapHeader2.Depth=ReadBlobLSBShort(image); BitmapHeader2.HorzRes=ReadBlobLSBShort(image); BitmapHeader2.VertRes=ReadBlobLSBShort(image); image->units=PixelsPerCentimeterResolution; image->page.width=(unsigned int) ((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightX)/470.0); image->page.height=(unsigned int) ((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightY)/470.0); image->page.x=(int) (BitmapHeader2.LowLeftX/470.0); image->page.y=(int) (BitmapHeader2.LowLeftX/470.0); if(BitmapHeader2.HorzRes && BitmapHeader2.VertRes) { image->x_resolution=BitmapHeader2.HorzRes/470.0; image->y_resolution=BitmapHeader2.VertRes/470.0; } image->columns=BitmapHeader2.Width; image->rows=BitmapHeader2.Height; bpp=BitmapHeader2.Depth; UnpackRaster: if ((image->colors == 0) && (bpp != 24)) { image->colors=one << bpp; if (!AcquireImageColormap(image,image->colors)) { NoMemory: ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } /* printf("Load default colormap \n"); */ for (i=0; (i < (int) image->colors) && (i < 256); i++) { image->colormap[i].red=ScaleCharToQuantum(WPG1_Palette[i].Red); image->colormap[i].green=ScaleCharToQuantum(WPG1_Palette[i].Green); image->colormap[i].blue=ScaleCharToQuantum(WPG1_Palette[i].Blue); } } else { if (bpp < 24) if ( (image->colors < (one << bpp)) && (bpp != 24) ) image->colormap=(PixelPacket *) ResizeQuantumMemory( image->colormap,(size_t) (one << bpp), sizeof(*image->colormap)); } if (bpp == 1) { if(image->colormap[0].red==0 && image->colormap[0].green==0 && image->colormap[0].blue==0 && image->colormap[1].red==0 && image->colormap[1].green==0 && image->colormap[1].blue==0) { /* fix crippled monochrome palette */ image->colormap[1].red = image->colormap[1].green = image->colormap[1].blue = QuantumRange; } } if(UnpackWPGRaster(image,bpp) < 0) /* The raster cannot be unpacked */ { DecompressionFailed: ThrowReaderException(CoderError,"UnableToDecompressImage"); } if(Rec.RecType==0x14 && BitmapHeader2.RotAngle!=0 && !image_info->ping) { /* flop command */ if(BitmapHeader2.RotAngle & 0x8000) { Image *flop_image; flop_image = FlopImage(image, exception); if (flop_image != (Image *) NULL) { DuplicateBlob(flop_image,image); (void) RemoveLastImageFromList(&image); AppendImageToList(&image,flop_image); } } /* flip command */ if(BitmapHeader2.RotAngle & 0x2000) { Image *flip_image; flip_image = FlipImage(image, exception); if (flip_image != (Image *) NULL) { DuplicateBlob(flip_image,image); (void) RemoveLastImageFromList(&image); AppendImageToList(&image,flip_image); } } /* rotate command */ if(BitmapHeader2.RotAngle & 0x0FFF) { Image *rotate_image; rotate_image=RotateImage(image,(BitmapHeader2.RotAngle & 0x0FFF), exception); if (rotate_image != (Image *) NULL) { DuplicateBlob(rotate_image,image); (void) RemoveLastImageFromList(&image); AppendImageToList(&image,rotate_image); } } } /* Allocate next image structure. */ AcquireNextImage(image_info,image); image->depth=8; if (image->next == (Image *) NULL) goto Finish; image=SyncNextImageInList(image); image->columns=image->rows=0; image->colors=0; break; case 0x1B: /* Postscript l2 */ if(Rec.RecordLength>0x3C) image=ExtractPostscript(image,image_info, TellBlob(image)+0x3C, /* skip PS l2 header in the wpg */ (ssize_t) Rec.RecordLength-0x3C,exception); break; } } break; case 2: /* WPG level 2 */ (void) memset(CTM,0,sizeof(CTM)); StartWPG.PosSizePrecision = 0; while(!EOFBlob(image)) /* object parser loop */ { (void) SeekBlob(image,Header.DataOffset,SEEK_SET); if(EOFBlob(image)) break; Rec2.Class=(i=ReadBlobByte(image)); if(i==EOF) break; Rec2.RecType=(i=ReadBlobByte(image)); if(i==EOF) break; Rd_WP_DWORD(image,&Rec2.Extension); Rd_WP_DWORD(image,&Rec2.RecordLength); if(EOFBlob(image)) break; Header.DataOffset=TellBlob(image)+Rec2.RecordLength; switch(Rec2.RecType) { case 1: StartWPG.HorizontalUnits=ReadBlobLSBShort(image); StartWPG.VerticalUnits=ReadBlobLSBShort(image); StartWPG.PosSizePrecision=ReadBlobByte(image); break; case 0x0C: /* Color palette */ WPG_Palette.StartIndex=ReadBlobLSBShort(image); WPG_Palette.NumOfEntries=ReadBlobLSBShort(image); image->colors=WPG_Palette.NumOfEntries; if (AcquireImageColormap(image,image->colors) == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); for (i=WPG_Palette.StartIndex; i < (int)WPG_Palette.NumOfEntries; i++) { image->colormap[i].red=ScaleCharToQuantum((char) ReadBlobByte(image)); image->colormap[i].green=ScaleCharToQuantum((char) ReadBlobByte(image)); image->colormap[i].blue=ScaleCharToQuantum((char) ReadBlobByte(image)); (void) ReadBlobByte(image); /*Opacity??*/ } break; case 0x0E: Bitmap2Header1.Width=ReadBlobLSBShort(image); Bitmap2Header1.Height=ReadBlobLSBShort(image); if ((Bitmap2Header1.Width == 0) || (Bitmap2Header1.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); Bitmap2Header1.Depth=ReadBlobByte(image); Bitmap2Header1.Compression=ReadBlobByte(image); if(Bitmap2Header1.Compression > 1) continue; /*Unknown compression method */ switch(Bitmap2Header1.Depth) { case 1: bpp=1; break; case 2: bpp=2; break; case 3: bpp=4; break; case 4: bpp=8; break; case 8: bpp=24; break; default: continue; /*Ignore raster with unknown depth*/ } image->columns=Bitmap2Header1.Width; image->rows=Bitmap2Header1.Height; if ((image->colors == 0) && (bpp != 24)) { size_t one; one=1; image->colors=one << bpp; if (!AcquireImageColormap(image,image->colors)) goto NoMemory; } else { if(bpp < 24) if( image->colors<(one << bpp) && bpp!=24 ) image->colormap=(PixelPacket *) ResizeQuantumMemory( image->colormap,(size_t) (one << bpp), sizeof(*image->colormap)); } switch(Bitmap2Header1.Compression) { case 0: /*Uncompressed raster*/ { ldblk=(ssize_t) ((bpp*image->columns+7)/8); BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t) ldblk,sizeof(*BImgBuff)); if (BImgBuff == (unsigned char *) NULL) goto NoMemory; for(i=0; i< (ssize_t) image->rows; i++) { (void) ReadBlob(image,ldblk,BImgBuff); InsertRow(BImgBuff,i,image,bpp); } if(BImgBuff) BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff);; break; } case 1: /*RLE for WPG2 */ { if( UnpackWPG2Raster(image,bpp) < 0) goto DecompressionFailed; break; } } if(CTM[0][0]<0 && !image_info->ping) { /*?? RotAngle=360-RotAngle;*/ Image *flop_image; flop_image = FlopImage(image, exception); if (flop_image != (Image *) NULL) { DuplicateBlob(flop_image,image); (void) RemoveLastImageFromList(&image); AppendImageToList(&image,flop_image); } /* Try to change CTM according to Flip - I am not sure, must be checked. Tx(0,0)=-1; Tx(1,0)=0; Tx(2,0)=0; Tx(0,1)= 0; Tx(1,1)=1; Tx(2,1)=0; Tx(0,2)=(WPG._2Rect.X_ur+WPG._2Rect.X_ll); Tx(1,2)=0; Tx(2,2)=1; */ } if(CTM[1][1]<0 && !image_info->ping) { /*?? RotAngle=360-RotAngle;*/ Image *flip_image; flip_image = FlipImage(image, exception); if (flip_image != (Image *) NULL) { DuplicateBlob(flip_image,image); (void) RemoveLastImageFromList(&image); AppendImageToList(&image,flip_image); } /* Try to change CTM according to Flip - I am not sure, must be checked. float_matrix Tx(3,3); Tx(0,0)= 1; Tx(1,0)= 0; Tx(2,0)=0; Tx(0,1)= 0; Tx(1,1)=-1; Tx(2,1)=0; Tx(0,2)= 0; Tx(1,2)=(WPG._2Rect.Y_ur+WPG._2Rect.Y_ll); Tx(2,2)=1; */ } /* Allocate next image structure. */ AcquireNextImage(image_info,image); image->depth=8; if (image->next == (Image *) NULL) goto Finish; image=SyncNextImageInList(image); image->columns=image->rows=1; image->colors=0; break; case 0x12: /* Postscript WPG2*/ i=ReadBlobLSBShort(image); if(Rec2.RecordLength > (unsigned int) i) image=ExtractPostscript(image,image_info, TellBlob(image)+i, /*skip PS header in the wpg2*/ (ssize_t) (Rec2.RecordLength-i-2),exception); break; case 0x1B: /*bitmap rectangle*/ WPG2Flags = LoadWPG2Flags(image,StartWPG.PosSizePrecision,NULL,&CTM); (void) WPG2Flags; break; } } break; default: { ThrowReaderException(CoderError,"DataEncodingSchemeIsNotSupported"); } } Finish: (void) CloseBlob(image); { Image *p; ssize_t scene=0; /* Rewind list, removing any empty images while rewinding. */ p=image; image=NULL; while (p != (Image *) NULL) { Image *tmp=p; if ((p->rows == 0) || (p->columns == 0)) { p=p->previous; DeleteImageFromList(&tmp); } else { image=p; p=p->previous; } } /* Fix scene numbers. */ for (p=image; p != (Image *) NULL; p=p->next) p->scene=(size_t) scene++; } if (image == (Image *) NULL) ThrowReaderException(CorruptImageError, "ImageFileDoesNotContainAnyImageData"); return(image); } Commit Message: CWE ID: CWE-119
static Image *ReadWPGImage(const ImageInfo *image_info, ExceptionInfo *exception) { typedef struct { size_t FileId; MagickOffsetType DataOffset; unsigned int ProductType; unsigned int FileType; unsigned char MajorVersion; unsigned char MinorVersion; unsigned int EncryptKey; unsigned int Reserved; } WPGHeader; typedef struct { unsigned char RecType; size_t RecordLength; } WPGRecord; typedef struct { unsigned char Class; unsigned char RecType; size_t Extension; size_t RecordLength; } WPG2Record; typedef struct { unsigned HorizontalUnits; unsigned VerticalUnits; unsigned char PosSizePrecision; } WPG2Start; typedef struct { unsigned int Width; unsigned int Height; unsigned int Depth; unsigned int HorzRes; unsigned int VertRes; } WPGBitmapType1; typedef struct { unsigned int Width; unsigned int Height; unsigned char Depth; unsigned char Compression; } WPG2BitmapType1; typedef struct { unsigned int RotAngle; unsigned int LowLeftX; unsigned int LowLeftY; unsigned int UpRightX; unsigned int UpRightY; unsigned int Width; unsigned int Height; unsigned int Depth; unsigned int HorzRes; unsigned int VertRes; } WPGBitmapType2; typedef struct { unsigned int StartIndex; unsigned int NumOfEntries; } WPGColorMapRec; /* typedef struct { size_t PS_unknown1; unsigned int PS_unknown2; unsigned int PS_unknown3; } WPGPSl1Record; */ Image *image; unsigned int status; WPGHeader Header; WPGRecord Rec; WPG2Record Rec2; WPG2Start StartWPG; WPGBitmapType1 BitmapHeader1; WPG2BitmapType1 Bitmap2Header1; WPGBitmapType2 BitmapHeader2; WPGColorMapRec WPG_Palette; int i, bpp, WPG2Flags; ssize_t ldblk; size_t one; unsigned char *BImgBuff; tCTM CTM; /*current transform matrix*/ /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); one=1; image=AcquireImage(image_info); image->depth=8; status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read WPG image. */ Header.FileId=ReadBlobLSBLong(image); Header.DataOffset=(MagickOffsetType) ReadBlobLSBLong(image); Header.ProductType=ReadBlobLSBShort(image); Header.FileType=ReadBlobLSBShort(image); Header.MajorVersion=ReadBlobByte(image); Header.MinorVersion=ReadBlobByte(image); Header.EncryptKey=ReadBlobLSBShort(image); Header.Reserved=ReadBlobLSBShort(image); if (Header.FileId!=0x435057FF || (Header.ProductType>>8)!=0x16) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (Header.EncryptKey!=0) ThrowReaderException(CoderError,"EncryptedWPGImageFileNotSupported"); image->columns = 1; image->rows = 1; image->colors = 0; bpp=0; BitmapHeader2.RotAngle=0; switch(Header.FileType) { case 1: /* WPG level 1 */ while(!EOFBlob(image)) /* object parser loop */ { (void) SeekBlob(image,Header.DataOffset,SEEK_SET); if(EOFBlob(image)) break; Rec.RecType=(i=ReadBlobByte(image)); if(i==EOF) break; Rd_WP_DWORD(image,&Rec.RecordLength); if(EOFBlob(image)) break; Header.DataOffset=TellBlob(image)+Rec.RecordLength; switch(Rec.RecType) { case 0x0B: /* bitmap type 1 */ BitmapHeader1.Width=ReadBlobLSBShort(image); BitmapHeader1.Height=ReadBlobLSBShort(image); if ((BitmapHeader1.Width == 0) || (BitmapHeader1.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); BitmapHeader1.Depth=ReadBlobLSBShort(image); BitmapHeader1.HorzRes=ReadBlobLSBShort(image); BitmapHeader1.VertRes=ReadBlobLSBShort(image); if(BitmapHeader1.HorzRes && BitmapHeader1.VertRes) { image->units=PixelsPerCentimeterResolution; image->x_resolution=BitmapHeader1.HorzRes/470.0; image->y_resolution=BitmapHeader1.VertRes/470.0; } image->columns=BitmapHeader1.Width; image->rows=BitmapHeader1.Height; bpp=BitmapHeader1.Depth; goto UnpackRaster; case 0x0E: /*Color palette */ WPG_Palette.StartIndex=ReadBlobLSBShort(image); WPG_Palette.NumOfEntries=ReadBlobLSBShort(image); image->colors=WPG_Palette.NumOfEntries; if (!AcquireImageColormap(image,image->colors)) goto NoMemory; for (i=WPG_Palette.StartIndex; i < (int)WPG_Palette.NumOfEntries; i++) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->colormap[i].green=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); } break; case 0x11: /* Start PS l1 */ if(Rec.RecordLength > 8) image=ExtractPostscript(image,image_info, TellBlob(image)+8, /* skip PS header in the wpg */ (ssize_t) Rec.RecordLength-8,exception); break; case 0x14: /* bitmap type 2 */ BitmapHeader2.RotAngle=ReadBlobLSBShort(image); BitmapHeader2.LowLeftX=ReadBlobLSBShort(image); BitmapHeader2.LowLeftY=ReadBlobLSBShort(image); BitmapHeader2.UpRightX=ReadBlobLSBShort(image); BitmapHeader2.UpRightY=ReadBlobLSBShort(image); BitmapHeader2.Width=ReadBlobLSBShort(image); BitmapHeader2.Height=ReadBlobLSBShort(image); if ((BitmapHeader2.Width == 0) || (BitmapHeader2.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); BitmapHeader2.Depth=ReadBlobLSBShort(image); BitmapHeader2.HorzRes=ReadBlobLSBShort(image); BitmapHeader2.VertRes=ReadBlobLSBShort(image); image->units=PixelsPerCentimeterResolution; image->page.width=(unsigned int) ((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightX)/470.0); image->page.height=(unsigned int) ((BitmapHeader2.LowLeftX-BitmapHeader2.UpRightY)/470.0); image->page.x=(int) (BitmapHeader2.LowLeftX/470.0); image->page.y=(int) (BitmapHeader2.LowLeftX/470.0); if(BitmapHeader2.HorzRes && BitmapHeader2.VertRes) { image->x_resolution=BitmapHeader2.HorzRes/470.0; image->y_resolution=BitmapHeader2.VertRes/470.0; } image->columns=BitmapHeader2.Width; image->rows=BitmapHeader2.Height; bpp=BitmapHeader2.Depth; UnpackRaster: if ((image->colors == 0) && (bpp != 24)) { image->colors=one << bpp; if (!AcquireImageColormap(image,image->colors)) { NoMemory: ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } /* printf("Load default colormap \n"); */ for (i=0; (i < (int) image->colors) && (i < 256); i++) { image->colormap[i].red=ScaleCharToQuantum(WPG1_Palette[i].Red); image->colormap[i].green=ScaleCharToQuantum(WPG1_Palette[i].Green); image->colormap[i].blue=ScaleCharToQuantum(WPG1_Palette[i].Blue); } } else { if (bpp < 24) if ( (image->colors < (one << bpp)) && (bpp != 24) ) image->colormap=(PixelPacket *) ResizeQuantumMemory( image->colormap,(size_t) (one << bpp), sizeof(*image->colormap)); } if (bpp == 1) { if(image->colormap[0].red==0 && image->colormap[0].green==0 && image->colormap[0].blue==0 && image->colormap[1].red==0 && image->colormap[1].green==0 && image->colormap[1].blue==0) { /* fix crippled monochrome palette */ image->colormap[1].red = image->colormap[1].green = image->colormap[1].blue = QuantumRange; } } if(UnpackWPGRaster(image,bpp) < 0) /* The raster cannot be unpacked */ { DecompressionFailed: ThrowReaderException(CoderError,"UnableToDecompressImage"); } if(Rec.RecType==0x14 && BitmapHeader2.RotAngle!=0 && !image_info->ping) { /* flop command */ if(BitmapHeader2.RotAngle & 0x8000) { Image *flop_image; flop_image = FlopImage(image, exception); if (flop_image != (Image *) NULL) { DuplicateBlob(flop_image,image); (void) RemoveLastImageFromList(&image); AppendImageToList(&image,flop_image); } } /* flip command */ if(BitmapHeader2.RotAngle & 0x2000) { Image *flip_image; flip_image = FlipImage(image, exception); if (flip_image != (Image *) NULL) { DuplicateBlob(flip_image,image); (void) RemoveLastImageFromList(&image); AppendImageToList(&image,flip_image); } } /* rotate command */ if(BitmapHeader2.RotAngle & 0x0FFF) { Image *rotate_image; rotate_image=RotateImage(image,(BitmapHeader2.RotAngle & 0x0FFF), exception); if (rotate_image != (Image *) NULL) { DuplicateBlob(rotate_image,image); (void) RemoveLastImageFromList(&image); AppendImageToList(&image,rotate_image); } } } /* Allocate next image structure. */ AcquireNextImage(image_info,image); image->depth=8; if (image->next == (Image *) NULL) goto Finish; image=SyncNextImageInList(image); image->columns=image->rows=0; image->colors=0; break; case 0x1B: /* Postscript l2 */ if(Rec.RecordLength>0x3C) image=ExtractPostscript(image,image_info, TellBlob(image)+0x3C, /* skip PS l2 header in the wpg */ (ssize_t) Rec.RecordLength-0x3C,exception); break; } } break; case 2: /* WPG level 2 */ (void) memset(CTM,0,sizeof(CTM)); StartWPG.PosSizePrecision = 0; while(!EOFBlob(image)) /* object parser loop */ { (void) SeekBlob(image,Header.DataOffset,SEEK_SET); if(EOFBlob(image)) break; Rec2.Class=(i=ReadBlobByte(image)); if(i==EOF) break; Rec2.RecType=(i=ReadBlobByte(image)); if(i==EOF) break; Rd_WP_DWORD(image,&Rec2.Extension); Rd_WP_DWORD(image,&Rec2.RecordLength); if(EOFBlob(image)) break; Header.DataOffset=TellBlob(image)+Rec2.RecordLength; switch(Rec2.RecType) { case 1: StartWPG.HorizontalUnits=ReadBlobLSBShort(image); StartWPG.VerticalUnits=ReadBlobLSBShort(image); StartWPG.PosSizePrecision=ReadBlobByte(image); break; case 0x0C: /* Color palette */ WPG_Palette.StartIndex=ReadBlobLSBShort(image); WPG_Palette.NumOfEntries=ReadBlobLSBShort(image); image->colors=WPG_Palette.NumOfEntries; if (AcquireImageColormap(image,image->colors) == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); for (i=WPG_Palette.StartIndex; i < (int)WPG_Palette.NumOfEntries; i++) { image->colormap[i].red=ScaleCharToQuantum((char) ReadBlobByte(image)); image->colormap[i].green=ScaleCharToQuantum((char) ReadBlobByte(image)); image->colormap[i].blue=ScaleCharToQuantum((char) ReadBlobByte(image)); (void) ReadBlobByte(image); /*Opacity??*/ } break; case 0x0E: Bitmap2Header1.Width=ReadBlobLSBShort(image); Bitmap2Header1.Height=ReadBlobLSBShort(image); if ((Bitmap2Header1.Width == 0) || (Bitmap2Header1.Height == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); Bitmap2Header1.Depth=ReadBlobByte(image); Bitmap2Header1.Compression=ReadBlobByte(image); if(Bitmap2Header1.Compression > 1) continue; /*Unknown compression method */ switch(Bitmap2Header1.Depth) { case 1: bpp=1; break; case 2: bpp=2; break; case 3: bpp=4; break; case 4: bpp=8; break; case 8: bpp=24; break; default: continue; /*Ignore raster with unknown depth*/ } image->columns=Bitmap2Header1.Width; image->rows=Bitmap2Header1.Height; if ((image->colors == 0) && (bpp != 24)) { size_t one; one=1; image->colors=one << bpp; if (!AcquireImageColormap(image,image->colors)) goto NoMemory; } else { if(bpp < 24) if( image->colors<(one << bpp) && bpp!=24 ) image->colormap=(PixelPacket *) ResizeQuantumMemory( image->colormap,(size_t) (one << bpp), sizeof(*image->colormap)); } switch(Bitmap2Header1.Compression) { case 0: /*Uncompressed raster*/ { ldblk=(ssize_t) ((bpp*image->columns+7)/8); BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t) ldblk,sizeof(*BImgBuff)); if (BImgBuff == (unsigned char *) NULL) goto NoMemory; for(i=0; i< (ssize_t) image->rows; i++) { (void) ReadBlob(image,ldblk,BImgBuff); InsertRow(BImgBuff,i,image,bpp); } if(BImgBuff) BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff);; break; } case 1: /*RLE for WPG2 */ { if( UnpackWPG2Raster(image,bpp) < 0) goto DecompressionFailed; break; } } if(CTM[0][0]<0 && !image_info->ping) { /*?? RotAngle=360-RotAngle;*/ Image *flop_image; flop_image = FlopImage(image, exception); if (flop_image != (Image *) NULL) { DuplicateBlob(flop_image,image); (void) RemoveLastImageFromList(&image); AppendImageToList(&image,flop_image); } /* Try to change CTM according to Flip - I am not sure, must be checked. Tx(0,0)=-1; Tx(1,0)=0; Tx(2,0)=0; Tx(0,1)= 0; Tx(1,1)=1; Tx(2,1)=0; Tx(0,2)=(WPG._2Rect.X_ur+WPG._2Rect.X_ll); Tx(1,2)=0; Tx(2,2)=1; */ } if(CTM[1][1]<0 && !image_info->ping) { /*?? RotAngle=360-RotAngle;*/ Image *flip_image; flip_image = FlipImage(image, exception); if (flip_image != (Image *) NULL) { DuplicateBlob(flip_image,image); (void) RemoveLastImageFromList(&image); AppendImageToList(&image,flip_image); } /* Try to change CTM according to Flip - I am not sure, must be checked. float_matrix Tx(3,3); Tx(0,0)= 1; Tx(1,0)= 0; Tx(2,0)=0; Tx(0,1)= 0; Tx(1,1)=-1; Tx(2,1)=0; Tx(0,2)= 0; Tx(1,2)=(WPG._2Rect.Y_ur+WPG._2Rect.Y_ll); Tx(2,2)=1; */ } /* Allocate next image structure. */ AcquireNextImage(image_info,image); image->depth=8; if (image->next == (Image *) NULL) goto Finish; image=SyncNextImageInList(image); image->columns=image->rows=1; image->colors=0; break; case 0x12: /* Postscript WPG2*/ i=ReadBlobLSBShort(image); if(Rec2.RecordLength > (unsigned int) i) image=ExtractPostscript(image,image_info, TellBlob(image)+i, /*skip PS header in the wpg2*/ (ssize_t) (Rec2.RecordLength-i-2),exception); break; case 0x1B: /*bitmap rectangle*/ WPG2Flags = LoadWPG2Flags(image,StartWPG.PosSizePrecision,NULL,&CTM); (void) WPG2Flags; break; } } break; default: { ThrowReaderException(CoderError,"DataEncodingSchemeIsNotSupported"); } } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } Finish: (void) CloseBlob(image); { Image *p; ssize_t scene=0; /* Rewind list, removing any empty images while rewinding. */ p=image; image=NULL; while (p != (Image *) NULL) { Image *tmp=p; if ((p->rows == 0) || (p->columns == 0)) { p=p->previous; DeleteImageFromList(&tmp); } else { image=p; p=p->previous; } } /* Fix scene numbers. */ for (p=image; p != (Image *) NULL; p=p->next) p->scene=(size_t) scene++; } if (image == (Image *) NULL) ThrowReaderException(CorruptImageError, "ImageFileDoesNotContainAnyImageData"); return(image); }
168,622
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static bool getCoverageFormat4(vector<uint32_t>& coverage, const uint8_t* data, size_t size) { const size_t kSegCountOffset = 6; const size_t kEndCountOffset = 14; const size_t kHeaderSize = 16; const size_t kSegmentSize = 8; // total size of array elements for one segment if (kEndCountOffset > size) { return false; } size_t segCount = readU16(data, kSegCountOffset) >> 1; if (kHeaderSize + segCount * kSegmentSize > size) { return false; } for (size_t i = 0; i < segCount; i++) { uint32_t end = readU16(data, kEndCountOffset + 2 * i); uint32_t start = readU16(data, kHeaderSize + 2 * (segCount + i)); if (end < start) { return false; } uint32_t rangeOffset = readU16(data, kHeaderSize + 2 * (3 * segCount + i)); if (rangeOffset == 0) { uint32_t delta = readU16(data, kHeaderSize + 2 * (2 * segCount + i)); if (((end + delta) & 0xffff) > end - start) { addRange(coverage, start, end + 1); } else { for (uint32_t j = start; j < end + 1; j++) { if (((j + delta) & 0xffff) != 0) { addRange(coverage, j, j + 1); } } } } else { for (uint32_t j = start; j < end + 1; j++) { uint32_t actualRangeOffset = kHeaderSize + 6 * segCount + rangeOffset + (i + j - start) * 2; if (actualRangeOffset + 2 > size) { continue; } uint32_t glyphId = readU16(data, actualRangeOffset); if (glyphId != 0) { addRange(coverage, j, j + 1); } } } } return true; } Commit Message: Add error logging on invalid cmap - DO NOT MERGE This patch logs instances of fonts with invalid cmap tables. Bug: 25645298 Bug: 26413177 Change-Id: I183985e9784a97a2b4307a22e036382b1fc90e5e CWE ID: CWE-20
static bool getCoverageFormat4(vector<uint32_t>& coverage, const uint8_t* data, size_t size) { const size_t kSegCountOffset = 6; const size_t kEndCountOffset = 14; const size_t kHeaderSize = 16; const size_t kSegmentSize = 8; // total size of array elements for one segment if (kEndCountOffset > size) { return false; } size_t segCount = readU16(data, kSegCountOffset) >> 1; if (kHeaderSize + segCount * kSegmentSize > size) { return false; } for (size_t i = 0; i < segCount; i++) { uint32_t end = readU16(data, kEndCountOffset + 2 * i); uint32_t start = readU16(data, kHeaderSize + 2 * (segCount + i)); if (end < start) { android_errorWriteLog(0x534e4554, "26413177"); return false; } uint32_t rangeOffset = readU16(data, kHeaderSize + 2 * (3 * segCount + i)); if (rangeOffset == 0) { uint32_t delta = readU16(data, kHeaderSize + 2 * (2 * segCount + i)); if (((end + delta) & 0xffff) > end - start) { addRange(coverage, start, end + 1); } else { for (uint32_t j = start; j < end + 1; j++) { if (((j + delta) & 0xffff) != 0) { addRange(coverage, j, j + 1); } } } } else { for (uint32_t j = start; j < end + 1; j++) { uint32_t actualRangeOffset = kHeaderSize + 6 * segCount + rangeOffset + (i + j - start) * 2; if (actualRangeOffset + 2 > size) { continue; } uint32_t glyphId = readU16(data, actualRangeOffset); if (glyphId != 0) { addRange(coverage, j, j + 1); } } } } return true; }
173,896
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static Image *ReadBMPImage(const ImageInfo *image_info,ExceptionInfo *exception) { BMPInfo bmp_info; Image *image; IndexPacket index; MagickBooleanType status; MagickOffsetType offset, start_position; MemoryInfo *pixel_info; register IndexPacket *indexes; register PixelPacket *q; register ssize_t i, x; register unsigned char *p; size_t bit, blue, bytes_per_line, green, length, red; ssize_t count, y; unsigned char magick[12], *pixels; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Determine if this a BMP file. */ (void) ResetMagickMemory(&bmp_info,0,sizeof(bmp_info)); bmp_info.ba_offset=0; start_position=0; count=ReadBlob(image,2,magick); do { LongPixelPacket shift; PixelPacket quantum_bits; size_t profile_data, profile_size; /* Verify BMP identifier. */ if (bmp_info.ba_offset == 0) start_position=TellBlob(image)-2; bmp_info.ba_offset=0; while (LocaleNCompare((char *) magick,"BA",2) == 0) { bmp_info.file_size=ReadBlobLSBLong(image); bmp_info.ba_offset=ReadBlobLSBLong(image); bmp_info.offset_bits=ReadBlobLSBLong(image); count=ReadBlob(image,2,magick); if (count != 2) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule()," Magick: %c%c", magick[0],magick[1]); if ((count == 0) || ((LocaleNCompare((char *) magick,"BM",2) != 0) && (LocaleNCompare((char *) magick,"CI",2) != 0))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); bmp_info.file_size=ReadBlobLSBLong(image); (void) ReadBlobLSBLong(image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " File_size in header: %u bytes",bmp_info.file_size); bmp_info.offset_bits=ReadBlobLSBLong(image); bmp_info.size=ReadBlobLSBLong(image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule()," BMP size: %u", bmp_info.size); if (bmp_info.size == 12) { /* OS/2 BMP image file. */ (void) CopyMagickString(image->magick,"BMP2",MaxTextExtent); bmp_info.width=(ssize_t) ((short) ReadBlobLSBShort(image)); bmp_info.height=(ssize_t) ((short) ReadBlobLSBShort(image)); bmp_info.planes=ReadBlobLSBShort(image); bmp_info.bits_per_pixel=ReadBlobLSBShort(image); bmp_info.x_pixels=0; bmp_info.y_pixels=0; bmp_info.number_colors=0; bmp_info.compression=BI_RGB; bmp_info.image_size=0; bmp_info.alpha_mask=0; if (image->debug != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Format: OS/2 Bitmap"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Geometry: %.20gx%.20g",(double) bmp_info.width,(double) bmp_info.height); } } else { /* Microsoft Windows BMP image file. */ if (bmp_info.size < 40) ThrowReaderException(CorruptImageError,"NonOS2HeaderSizeError"); bmp_info.width=(ssize_t) ((int) ReadBlobLSBLong(image)); bmp_info.height=(ssize_t) ((int) ReadBlobLSBLong(image)); bmp_info.planes=ReadBlobLSBShort(image); bmp_info.bits_per_pixel=ReadBlobLSBShort(image); bmp_info.compression=ReadBlobLSBLong(image); bmp_info.image_size=ReadBlobLSBLong(image); bmp_info.x_pixels=ReadBlobLSBLong(image); bmp_info.y_pixels=ReadBlobLSBLong(image); bmp_info.number_colors=ReadBlobLSBLong(image); bmp_info.colors_important=ReadBlobLSBLong(image); profile_data=0; profile_size=0; if (image->debug != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Format: MS Windows bitmap"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Geometry: %.20gx%.20g",(double) bmp_info.width,(double) bmp_info.height); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Bits per pixel: %.20g",(double) bmp_info.bits_per_pixel); switch ((int) bmp_info.compression) { case BI_RGB: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression: BI_RGB"); break; } case BI_RLE4: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression: BI_RLE4"); break; } case BI_RLE8: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression: BI_RLE8"); break; } case BI_BITFIELDS: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression: BI_BITFIELDS"); break; } case BI_PNG: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression: BI_PNG"); break; } case BI_JPEG: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression: BI_JPEG"); break; } default: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression: UNKNOWN (%u)",bmp_info.compression); } } (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Number of colors: %u",bmp_info.number_colors); } bmp_info.red_mask=ReadBlobLSBLong(image); bmp_info.green_mask=ReadBlobLSBLong(image); bmp_info.blue_mask=ReadBlobLSBLong(image); if (bmp_info.size > 40) { double sum; /* Read color management information. */ bmp_info.alpha_mask=ReadBlobLSBLong(image); bmp_info.colorspace=(int) ReadBlobLSBLong(image); /* Decode 2^30 fixed point formatted CIE primaries. */ # define BMP_DENOM ((double) 0x40000000) bmp_info.red_primary.x=(double) ReadBlobLSBLong(image)/BMP_DENOM; bmp_info.red_primary.y=(double) ReadBlobLSBLong(image)/BMP_DENOM; bmp_info.red_primary.z=(double) ReadBlobLSBLong(image)/BMP_DENOM; bmp_info.green_primary.x=(double) ReadBlobLSBLong(image)/BMP_DENOM; bmp_info.green_primary.y=(double) ReadBlobLSBLong(image)/BMP_DENOM; bmp_info.green_primary.z=(double) ReadBlobLSBLong(image)/BMP_DENOM; bmp_info.blue_primary.x=(double) ReadBlobLSBLong(image)/BMP_DENOM; bmp_info.blue_primary.y=(double) ReadBlobLSBLong(image)/BMP_DENOM; bmp_info.blue_primary.z=(double) ReadBlobLSBLong(image)/BMP_DENOM; sum=bmp_info.red_primary.x+bmp_info.red_primary.y+ bmp_info.red_primary.z; bmp_info.red_primary.x/=sum; bmp_info.red_primary.y/=sum; image->chromaticity.red_primary.x=bmp_info.red_primary.x; image->chromaticity.red_primary.y=bmp_info.red_primary.y; sum=bmp_info.green_primary.x+bmp_info.green_primary.y+ bmp_info.green_primary.z; bmp_info.green_primary.x/=sum; bmp_info.green_primary.y/=sum; image->chromaticity.green_primary.x=bmp_info.green_primary.x; image->chromaticity.green_primary.y=bmp_info.green_primary.y; sum=bmp_info.blue_primary.x+bmp_info.blue_primary.y+ bmp_info.blue_primary.z; bmp_info.blue_primary.x/=sum; bmp_info.blue_primary.y/=sum; image->chromaticity.blue_primary.x=bmp_info.blue_primary.x; image->chromaticity.blue_primary.y=bmp_info.blue_primary.y; /* Decode 16^16 fixed point formatted gamma_scales. */ bmp_info.gamma_scale.x=(double) ReadBlobLSBLong(image)/0x10000; bmp_info.gamma_scale.y=(double) ReadBlobLSBLong(image)/0x10000; bmp_info.gamma_scale.z=(double) ReadBlobLSBLong(image)/0x10000; /* Compute a single gamma from the BMP 3-channel gamma. */ image->gamma=(bmp_info.gamma_scale.x+bmp_info.gamma_scale.y+ bmp_info.gamma_scale.z)/3.0; } else (void) CopyMagickString(image->magick,"BMP3",MaxTextExtent); if (bmp_info.size > 108) { size_t intent; /* Read BMP Version 5 color management information. */ intent=ReadBlobLSBLong(image); switch ((int) intent) { case LCS_GM_BUSINESS: { image->rendering_intent=SaturationIntent; break; } case LCS_GM_GRAPHICS: { image->rendering_intent=RelativeIntent; break; } case LCS_GM_IMAGES: { image->rendering_intent=PerceptualIntent; break; } case LCS_GM_ABS_COLORIMETRIC: { image->rendering_intent=AbsoluteIntent; break; } } profile_data=ReadBlobLSBLong(image); profile_size=ReadBlobLSBLong(image); (void) profile_data; (void) profile_size; (void) ReadBlobLSBLong(image); /* Reserved byte */ } } if ((MagickSizeType) bmp_info.file_size > GetBlobSize(image)) (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "LengthAndFilesizeDoNotMatch","`%s'",image->filename); else if ((MagickSizeType) bmp_info.file_size < GetBlobSize(image)) (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageWarning,"LengthAndFilesizeDoNotMatch","`%s'", image->filename); if (bmp_info.width <= 0) ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize"); if (bmp_info.height == 0) ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize"); if (bmp_info.planes != 1) ThrowReaderException(CorruptImageError,"StaticPlanesValueNotEqualToOne"); if ((bmp_info.bits_per_pixel != 1) && (bmp_info.bits_per_pixel != 4) && (bmp_info.bits_per_pixel != 8) && (bmp_info.bits_per_pixel != 16) && (bmp_info.bits_per_pixel != 24) && (bmp_info.bits_per_pixel != 32)) ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel"); if (bmp_info.bits_per_pixel < 16 && bmp_info.number_colors > (1U << bmp_info.bits_per_pixel)) { ThrowReaderException(CorruptImageError, "UnrecognizedNumberOfColors"); } if ((bmp_info.compression == 1) && (bmp_info.bits_per_pixel != 8)) ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel"); if ((bmp_info.compression == 2) && (bmp_info.bits_per_pixel != 4)) ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel"); if ((bmp_info.compression == 3) && (bmp_info.bits_per_pixel < 16)) ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel"); switch (bmp_info.compression) { case BI_RGB: case BI_RLE8: case BI_RLE4: case BI_BITFIELDS: break; case BI_JPEG: ThrowReaderException(CoderError,"JPEGCompressNotSupported"); case BI_PNG: ThrowReaderException(CoderError,"PNGCompressNotSupported"); default: ThrowReaderException(CorruptImageError,"UnrecognizedImageCompression"); } image->columns=(size_t) MagickAbsoluteValue(bmp_info.width); image->rows=(size_t) MagickAbsoluteValue(bmp_info.height); image->depth=bmp_info.bits_per_pixel <= 8 ? bmp_info.bits_per_pixel : 8; image->matte=((bmp_info.alpha_mask != 0) && (bmp_info.compression == BI_BITFIELDS)) || (bmp_info.bits_per_pixel == 32) ? MagickTrue : MagickFalse; if (bmp_info.bits_per_pixel < 16) { size_t one; image->storage_class=PseudoClass; image->colors=bmp_info.number_colors; one=1; if (image->colors == 0) image->colors=one << bmp_info.bits_per_pixel; } if (image->storage_class == PseudoClass) { unsigned char *bmp_colormap; size_t packet_size; /* Read BMP raster colormap. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading colormap of %.20g colors",(double) image->colors); if (AcquireImageColormap(image,image->colors) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); bmp_colormap=(unsigned char *) AcquireQuantumMemory((size_t) image->colors,4*sizeof(*bmp_colormap)); if (bmp_colormap == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if ((bmp_info.size == 12) || (bmp_info.size == 64)) packet_size=3; else packet_size=4; offset=SeekBlob(image,start_position+14+bmp_info.size,SEEK_SET); if (offset < 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); count=ReadBlob(image,packet_size*image->colors,bmp_colormap); if (count != (ssize_t) (packet_size*image->colors)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); p=bmp_colormap; for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].blue=ScaleCharToQuantum(*p++); image->colormap[i].green=ScaleCharToQuantum(*p++); image->colormap[i].red=ScaleCharToQuantum(*p++); if (packet_size == 4) p++; } bmp_colormap=(unsigned char *) RelinquishMagickMemory(bmp_colormap); } image->x_resolution=(double) bmp_info.x_pixels/100.0; image->y_resolution=(double) bmp_info.y_pixels/100.0; image->units=PixelsPerCentimeterResolution; if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; /* Read image data. */ offset=SeekBlob(image,start_position+bmp_info.offset_bits,SEEK_SET); if (offset < 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (bmp_info.compression == BI_RLE4) bmp_info.bits_per_pixel<<=1; bytes_per_line=4*((image->columns*bmp_info.bits_per_pixel+31)/32); length=(size_t) bytes_per_line*image->rows; pixel_info=AcquireVirtualMemory((size_t) image->rows, MagickMax(bytes_per_line,image->columns+256UL)*sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); if ((bmp_info.compression == BI_RGB) || (bmp_info.compression == BI_BITFIELDS)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading pixels (%.20g bytes)",(double) length); count=ReadBlob(image,length,pixels); if (count != (ssize_t) length) { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowReaderException(CorruptImageError, "InsufficientImageDataInFile"); } } else { /* Convert run-length encoded raster pixels. */ status=DecodeImage(image,bmp_info.compression,pixels); if (status == MagickFalse) { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowReaderException(CorruptImageError, "UnableToRunlengthDecodeImage"); } } /* Convert BMP raster image to pixel packets. */ if (bmp_info.compression == BI_RGB) { bmp_info.alpha_mask=image->matte != MagickFalse ? 0xff000000U : 0U; bmp_info.red_mask=0x00ff0000U; bmp_info.green_mask=0x0000ff00U; bmp_info.blue_mask=0x000000ffU; if (bmp_info.bits_per_pixel == 16) { /* RGB555. */ bmp_info.red_mask=0x00007c00U; bmp_info.green_mask=0x000003e0U; bmp_info.blue_mask=0x0000001fU; } } if ((bmp_info.bits_per_pixel == 16) || (bmp_info.bits_per_pixel == 32)) { register size_t sample; /* Get shift and quantum bits info from bitfield masks. */ (void) ResetMagickMemory(&shift,0,sizeof(shift)); (void) ResetMagickMemory(&quantum_bits,0,sizeof(quantum_bits)); if (bmp_info.red_mask != 0) while (((bmp_info.red_mask << shift.red) & 0x80000000UL) == 0) shift.red++; if (bmp_info.green_mask != 0) while (((bmp_info.green_mask << shift.green) & 0x80000000UL) == 0) shift.green++; if (bmp_info.blue_mask != 0) while (((bmp_info.blue_mask << shift.blue) & 0x80000000UL) == 0) shift.blue++; if (bmp_info.alpha_mask != 0) while (((bmp_info.alpha_mask << shift.opacity) & 0x80000000UL) == 0) shift.opacity++; sample=shift.red; while (((bmp_info.red_mask << sample) & 0x80000000UL) != 0) sample++; quantum_bits.red=ClampToQuantum((MagickRealType) sample-shift.red); sample=shift.green; while (((bmp_info.green_mask << sample) & 0x80000000UL) != 0) sample++; quantum_bits.green=ClampToQuantum((MagickRealType) sample-shift.green); sample=shift.blue; while (((bmp_info.blue_mask << sample) & 0x80000000UL) != 0) sample++; quantum_bits.blue=ClampToQuantum((MagickRealType) sample-shift.blue); sample=shift.opacity; while (((bmp_info.alpha_mask << sample) & 0x80000000UL) != 0) sample++; quantum_bits.opacity=ClampToQuantum((MagickRealType) sample- shift.opacity); } switch (bmp_info.bits_per_pixel) { case 1: { /* Convert bitmap scanline. */ for (y=(ssize_t) image->rows-1; y >= 0; y--) { p=pixels+(image->rows-y-1)*bytes_per_line; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < ((ssize_t) image->columns-7); x+=8) { for (bit=0; bit < 8; bit++) { index=(IndexPacket) (((*p) & (0x80 >> bit)) != 0 ? 0x01 : 0x00); SetPixelIndex(indexes+x+bit,index); q++; } p++; } if ((image->columns % 8) != 0) { for (bit=0; bit < (image->columns % 8); bit++) { index=(IndexPacket) (((*p) & (0x80 >> bit)) != 0 ? 0x01 : 0x00); SetPixelIndex(indexes+x+bit,index); } p++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) (image->rows-y),image->rows); if (status == MagickFalse) break; } } (void) SyncImage(image); break; } case 4: { /* Convert PseudoColor scanline. */ for (y=(ssize_t) image->rows-1; y >= 0; y--) { p=pixels+(image->rows-y-1)*bytes_per_line; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < ((ssize_t) image->columns-1); x+=2) { index=ConstrainColormapIndex(image,(*p >> 4) & 0x0f); SetPixelIndex(indexes+x,index); index=ConstrainColormapIndex(image,*p & 0x0f); SetPixelIndex(indexes+x+1,index); p++; } if ((image->columns % 2) != 0) { index=ConstrainColormapIndex(image,(*p >> 4) & 0xf); SetPixelIndex(indexes+x,index); p++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) (image->rows-y),image->rows); if (status == MagickFalse) break; } } (void) SyncImage(image); break; } case 8: { /* Convert PseudoColor scanline. */ if ((bmp_info.compression == BI_RLE8) || (bmp_info.compression == BI_RLE4)) bytes_per_line=image->columns; for (y=(ssize_t) image->rows-1; y >= 0; y--) { p=pixels+(image->rows-y-1)*bytes_per_line; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=(ssize_t) image->columns; x != 0; --x) { index=ConstrainColormapIndex(image,*p); SetPixelIndex(indexes,index); indexes++; p++; q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; offset=(MagickOffsetType) (image->rows-y-1); if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) (image->rows-y),image->rows); if (status == MagickFalse) break; } } (void) SyncImage(image); break; } case 16: { size_t alpha, pixel; /* Convert bitfield encoded 16-bit PseudoColor scanline. */ if (bmp_info.compression != BI_RGB && bmp_info.compression != BI_BITFIELDS) { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowReaderException(CorruptImageError, "UnrecognizedImageCompression"); } bytes_per_line=2*(image->columns+image->columns % 2); image->storage_class=DirectClass; for (y=(ssize_t) image->rows-1; y >= 0; y--) { p=pixels+(image->rows-y-1)*bytes_per_line; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { pixel=(size_t) (*p++); pixel|=(*p++) << 8; red=((pixel & bmp_info.red_mask) << shift.red) >> 16; if (quantum_bits.red == 5) red|=((red & 0xe000) >> 5); if (quantum_bits.red <= 8) red|=((red & 0xff00) >> 8); green=((pixel & bmp_info.green_mask) << shift.green) >> 16; if (quantum_bits.green == 5) green|=((green & 0xe000) >> 5); if (quantum_bits.green == 6) green|=((green & 0xc000) >> 6); if (quantum_bits.green <= 8) green|=((green & 0xff00) >> 8); blue=((pixel & bmp_info.blue_mask) << shift.blue) >> 16; if (quantum_bits.blue == 5) blue|=((blue & 0xe000) >> 5); if (quantum_bits.blue <= 8) blue|=((blue & 0xff00) >> 8); alpha=((pixel & bmp_info.alpha_mask) << shift.opacity) >> 16; if (quantum_bits.opacity <= 8) alpha|=((alpha & 0xff00) >> 8); SetPixelRed(q,ScaleShortToQuantum((unsigned short) red)); SetPixelGreen(q,ScaleShortToQuantum((unsigned short) green)); SetPixelBlue(q,ScaleShortToQuantum((unsigned short) blue)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelAlpha(q,ScaleShortToQuantum((unsigned short) alpha)); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; offset=(MagickOffsetType) (image->rows-y-1); if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) (image->rows-y),image->rows); if (status == MagickFalse) break; } } break; } case 24: { /* Convert DirectColor scanline. */ bytes_per_line=4*((image->columns*24+31)/32); for (y=(ssize_t) image->rows-1; y >= 0; y--) { p=pixels+(image->rows-y-1)*bytes_per_line; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelBlue(q,ScaleCharToQuantum(*p++)); SetPixelGreen(q,ScaleCharToQuantum(*p++)); SetPixelRed(q,ScaleCharToQuantum(*p++)); SetPixelOpacity(q,OpaqueOpacity); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; offset=(MagickOffsetType) (image->rows-y-1); if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) (image->rows-y),image->rows); if (status == MagickFalse) break; } } break; } case 32: { /* Convert bitfield encoded DirectColor scanline. */ if ((bmp_info.compression != BI_RGB) && (bmp_info.compression != BI_BITFIELDS)) { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowReaderException(CorruptImageError, "UnrecognizedImageCompression"); } bytes_per_line=4*(image->columns); for (y=(ssize_t) image->rows-1; y >= 0; y--) { size_t alpha, pixel; p=pixels+(image->rows-y-1)*bytes_per_line; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { pixel=(size_t) (*p++); pixel|=((size_t) *p++ << 8); pixel|=((size_t) *p++ << 16); pixel|=((size_t) *p++ << 24); red=((pixel & bmp_info.red_mask) << shift.red) >> 16; if (quantum_bits.red == 8) red|=(red >> 8); green=((pixel & bmp_info.green_mask) << shift.green) >> 16; if (quantum_bits.green == 8) green|=(green >> 8); blue=((pixel & bmp_info.blue_mask) << shift.blue) >> 16; if (quantum_bits.blue == 8) blue|=(blue >> 8); alpha=((pixel & bmp_info.alpha_mask) << shift.opacity) >> 16; if (quantum_bits.opacity == 8) alpha|=(alpha >> 8); SetPixelRed(q,ScaleShortToQuantum((unsigned short) red)); SetPixelGreen(q,ScaleShortToQuantum((unsigned short) green)); SetPixelBlue(q,ScaleShortToQuantum((unsigned short) blue)); SetPixelAlpha(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelAlpha(q,ScaleShortToQuantum((unsigned short) alpha)); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; offset=(MagickOffsetType) (image->rows-y-1); if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) (image->rows-y),image->rows); if (status == MagickFalse) break; } } break; } default: { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } } pixel_info=RelinquishVirtualMemory(pixel_info); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } if (bmp_info.height < 0) { Image *flipped_image; /* Correct image orientation. */ flipped_image=FlipImage(image,exception); if (flipped_image != (Image *) NULL) { DuplicateBlob(flipped_image,image); image=DestroyImage(image); image=flipped_image; } } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; *magick='\0'; if (bmp_info.ba_offset != 0) { offset=SeekBlob(image,(MagickOffsetType) bmp_info.ba_offset,SEEK_SET); if (offset < 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } count=ReadBlob(image,2,magick); if ((count == 2) && (IsBMP(magick,2) != MagickFalse)) { /* Acquire next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } while (IsBMP(magick,2) != MagickFalse); (void) CloseBlob(image); return(GetFirstImageInList(image)); } Commit Message: CWE ID: CWE-119
static Image *ReadBMPImage(const ImageInfo *image_info,ExceptionInfo *exception) { BMPInfo bmp_info; Image *image; IndexPacket index; MagickBooleanType status; MagickOffsetType offset, start_position; MemoryInfo *pixel_info; register IndexPacket *indexes; register PixelPacket *q; register ssize_t i, x; register unsigned char *p; size_t bit, blue, bytes_per_line, green, length, red; ssize_t count, y; unsigned char magick[12], *pixels; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Determine if this a BMP file. */ (void) ResetMagickMemory(&bmp_info,0,sizeof(bmp_info)); bmp_info.ba_offset=0; start_position=0; count=ReadBlob(image,2,magick); do { LongPixelPacket shift; PixelPacket quantum_bits; size_t profile_data, profile_size; /* Verify BMP identifier. */ if (bmp_info.ba_offset == 0) start_position=TellBlob(image)-2; bmp_info.ba_offset=0; while (LocaleNCompare((char *) magick,"BA",2) == 0) { bmp_info.file_size=ReadBlobLSBLong(image); bmp_info.ba_offset=ReadBlobLSBLong(image); bmp_info.offset_bits=ReadBlobLSBLong(image); count=ReadBlob(image,2,magick); if (count != 2) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule()," Magick: %c%c", magick[0],magick[1]); if ((count == 0) || ((LocaleNCompare((char *) magick,"BM",2) != 0) && (LocaleNCompare((char *) magick,"CI",2) != 0))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); bmp_info.file_size=ReadBlobLSBLong(image); (void) ReadBlobLSBLong(image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " File_size in header: %u bytes",bmp_info.file_size); bmp_info.offset_bits=ReadBlobLSBLong(image); bmp_info.size=ReadBlobLSBLong(image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule()," BMP size: %u", bmp_info.size); if (bmp_info.size == 12) { /* OS/2 BMP image file. */ (void) CopyMagickString(image->magick,"BMP2",MaxTextExtent); bmp_info.width=(ssize_t) ((short) ReadBlobLSBShort(image)); bmp_info.height=(ssize_t) ((short) ReadBlobLSBShort(image)); bmp_info.planes=ReadBlobLSBShort(image); bmp_info.bits_per_pixel=ReadBlobLSBShort(image); bmp_info.x_pixels=0; bmp_info.y_pixels=0; bmp_info.number_colors=0; bmp_info.compression=BI_RGB; bmp_info.image_size=0; bmp_info.alpha_mask=0; if (image->debug != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Format: OS/2 Bitmap"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Geometry: %.20gx%.20g",(double) bmp_info.width,(double) bmp_info.height); } } else { /* Microsoft Windows BMP image file. */ if (bmp_info.size < 40) ThrowReaderException(CorruptImageError,"NonOS2HeaderSizeError"); bmp_info.width=(ssize_t) ((int) ReadBlobLSBLong(image)); bmp_info.height=(ssize_t) ((int) ReadBlobLSBLong(image)); bmp_info.planes=ReadBlobLSBShort(image); bmp_info.bits_per_pixel=ReadBlobLSBShort(image); bmp_info.compression=ReadBlobLSBLong(image); bmp_info.image_size=ReadBlobLSBLong(image); bmp_info.x_pixels=ReadBlobLSBLong(image); bmp_info.y_pixels=ReadBlobLSBLong(image); bmp_info.number_colors=ReadBlobLSBLong(image); bmp_info.colors_important=ReadBlobLSBLong(image); profile_data=0; profile_size=0; if (image->debug != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Format: MS Windows bitmap"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Geometry: %.20gx%.20g",(double) bmp_info.width,(double) bmp_info.height); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Bits per pixel: %.20g",(double) bmp_info.bits_per_pixel); switch ((int) bmp_info.compression) { case BI_RGB: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression: BI_RGB"); break; } case BI_RLE4: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression: BI_RLE4"); break; } case BI_RLE8: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression: BI_RLE8"); break; } case BI_BITFIELDS: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression: BI_BITFIELDS"); break; } case BI_PNG: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression: BI_PNG"); break; } case BI_JPEG: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression: BI_JPEG"); break; } default: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression: UNKNOWN (%u)",bmp_info.compression); } } (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Number of colors: %u",bmp_info.number_colors); } bmp_info.red_mask=ReadBlobLSBLong(image); bmp_info.green_mask=ReadBlobLSBLong(image); bmp_info.blue_mask=ReadBlobLSBLong(image); if (bmp_info.size > 40) { double sum; /* Read color management information. */ bmp_info.alpha_mask=ReadBlobLSBLong(image); bmp_info.colorspace=(int) ReadBlobLSBLong(image); /* Decode 2^30 fixed point formatted CIE primaries. */ # define BMP_DENOM ((double) 0x40000000) bmp_info.red_primary.x=(double) ReadBlobLSBLong(image)/BMP_DENOM; bmp_info.red_primary.y=(double) ReadBlobLSBLong(image)/BMP_DENOM; bmp_info.red_primary.z=(double) ReadBlobLSBLong(image)/BMP_DENOM; bmp_info.green_primary.x=(double) ReadBlobLSBLong(image)/BMP_DENOM; bmp_info.green_primary.y=(double) ReadBlobLSBLong(image)/BMP_DENOM; bmp_info.green_primary.z=(double) ReadBlobLSBLong(image)/BMP_DENOM; bmp_info.blue_primary.x=(double) ReadBlobLSBLong(image)/BMP_DENOM; bmp_info.blue_primary.y=(double) ReadBlobLSBLong(image)/BMP_DENOM; bmp_info.blue_primary.z=(double) ReadBlobLSBLong(image)/BMP_DENOM; sum=bmp_info.red_primary.x+bmp_info.red_primary.y+ bmp_info.red_primary.z; bmp_info.red_primary.x/=sum; bmp_info.red_primary.y/=sum; image->chromaticity.red_primary.x=bmp_info.red_primary.x; image->chromaticity.red_primary.y=bmp_info.red_primary.y; sum=bmp_info.green_primary.x+bmp_info.green_primary.y+ bmp_info.green_primary.z; bmp_info.green_primary.x/=sum; bmp_info.green_primary.y/=sum; image->chromaticity.green_primary.x=bmp_info.green_primary.x; image->chromaticity.green_primary.y=bmp_info.green_primary.y; sum=bmp_info.blue_primary.x+bmp_info.blue_primary.y+ bmp_info.blue_primary.z; bmp_info.blue_primary.x/=sum; bmp_info.blue_primary.y/=sum; image->chromaticity.blue_primary.x=bmp_info.blue_primary.x; image->chromaticity.blue_primary.y=bmp_info.blue_primary.y; /* Decode 16^16 fixed point formatted gamma_scales. */ bmp_info.gamma_scale.x=(double) ReadBlobLSBLong(image)/0x10000; bmp_info.gamma_scale.y=(double) ReadBlobLSBLong(image)/0x10000; bmp_info.gamma_scale.z=(double) ReadBlobLSBLong(image)/0x10000; /* Compute a single gamma from the BMP 3-channel gamma. */ image->gamma=(bmp_info.gamma_scale.x+bmp_info.gamma_scale.y+ bmp_info.gamma_scale.z)/3.0; } else (void) CopyMagickString(image->magick,"BMP3",MaxTextExtent); if (bmp_info.size > 108) { size_t intent; /* Read BMP Version 5 color management information. */ intent=ReadBlobLSBLong(image); switch ((int) intent) { case LCS_GM_BUSINESS: { image->rendering_intent=SaturationIntent; break; } case LCS_GM_GRAPHICS: { image->rendering_intent=RelativeIntent; break; } case LCS_GM_IMAGES: { image->rendering_intent=PerceptualIntent; break; } case LCS_GM_ABS_COLORIMETRIC: { image->rendering_intent=AbsoluteIntent; break; } } profile_data=ReadBlobLSBLong(image); profile_size=ReadBlobLSBLong(image); (void) profile_data; (void) profile_size; (void) ReadBlobLSBLong(image); /* Reserved byte */ } } if ((MagickSizeType) bmp_info.file_size > GetBlobSize(image)) (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "LengthAndFilesizeDoNotMatch","`%s'",image->filename); else if ((MagickSizeType) bmp_info.file_size < GetBlobSize(image)) (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageWarning,"LengthAndFilesizeDoNotMatch","`%s'", image->filename); if (bmp_info.width <= 0) ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize"); if (bmp_info.height == 0) ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize"); if (bmp_info.planes != 1) ThrowReaderException(CorruptImageError,"StaticPlanesValueNotEqualToOne"); if ((bmp_info.bits_per_pixel != 1) && (bmp_info.bits_per_pixel != 4) && (bmp_info.bits_per_pixel != 8) && (bmp_info.bits_per_pixel != 16) && (bmp_info.bits_per_pixel != 24) && (bmp_info.bits_per_pixel != 32)) ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel"); if (bmp_info.bits_per_pixel < 16 && bmp_info.number_colors > (1U << bmp_info.bits_per_pixel)) { ThrowReaderException(CorruptImageError, "UnrecognizedNumberOfColors"); } if ((bmp_info.compression == 1) && (bmp_info.bits_per_pixel != 8)) ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel"); if ((bmp_info.compression == 2) && (bmp_info.bits_per_pixel != 4)) ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel"); if ((bmp_info.compression == 3) && (bmp_info.bits_per_pixel < 16)) ThrowReaderException(CorruptImageError,"UnrecognizedBitsPerPixel"); switch (bmp_info.compression) { case BI_RGB: case BI_RLE8: case BI_RLE4: case BI_BITFIELDS: break; case BI_JPEG: ThrowReaderException(CoderError,"JPEGCompressNotSupported"); case BI_PNG: ThrowReaderException(CoderError,"PNGCompressNotSupported"); default: ThrowReaderException(CorruptImageError,"UnrecognizedImageCompression"); } image->columns=(size_t) MagickAbsoluteValue(bmp_info.width); image->rows=(size_t) MagickAbsoluteValue(bmp_info.height); image->depth=bmp_info.bits_per_pixel <= 8 ? bmp_info.bits_per_pixel : 8; image->matte=((bmp_info.alpha_mask != 0) && (bmp_info.compression == BI_BITFIELDS)) || (bmp_info.bits_per_pixel == 32) ? MagickTrue : MagickFalse; if (bmp_info.bits_per_pixel < 16) { size_t one; image->storage_class=PseudoClass; image->colors=bmp_info.number_colors; one=1; if (image->colors == 0) image->colors=one << bmp_info.bits_per_pixel; } if (image->storage_class == PseudoClass) { unsigned char *bmp_colormap; size_t packet_size; /* Read BMP raster colormap. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading colormap of %.20g colors",(double) image->colors); if (AcquireImageColormap(image,image->colors) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); bmp_colormap=(unsigned char *) AcquireQuantumMemory((size_t) image->colors,4*sizeof(*bmp_colormap)); if (bmp_colormap == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if ((bmp_info.size == 12) || (bmp_info.size == 64)) packet_size=3; else packet_size=4; offset=SeekBlob(image,start_position+14+bmp_info.size,SEEK_SET); if (offset < 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); count=ReadBlob(image,packet_size*image->colors,bmp_colormap); if (count != (ssize_t) (packet_size*image->colors)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); p=bmp_colormap; for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].blue=ScaleCharToQuantum(*p++); image->colormap[i].green=ScaleCharToQuantum(*p++); image->colormap[i].red=ScaleCharToQuantum(*p++); if (packet_size == 4) p++; } bmp_colormap=(unsigned char *) RelinquishMagickMemory(bmp_colormap); } image->x_resolution=(double) bmp_info.x_pixels/100.0; image->y_resolution=(double) bmp_info.y_pixels/100.0; image->units=PixelsPerCentimeterResolution; if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } /* Read image data. */ offset=SeekBlob(image,start_position+bmp_info.offset_bits,SEEK_SET); if (offset < 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (bmp_info.compression == BI_RLE4) bmp_info.bits_per_pixel<<=1; bytes_per_line=4*((image->columns*bmp_info.bits_per_pixel+31)/32); length=(size_t) bytes_per_line*image->rows; pixel_info=AcquireVirtualMemory((size_t) image->rows, MagickMax(bytes_per_line,image->columns+256UL)*sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); if ((bmp_info.compression == BI_RGB) || (bmp_info.compression == BI_BITFIELDS)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading pixels (%.20g bytes)",(double) length); count=ReadBlob(image,length,pixels); if (count != (ssize_t) length) { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowReaderException(CorruptImageError, "InsufficientImageDataInFile"); } } else { /* Convert run-length encoded raster pixels. */ status=DecodeImage(image,bmp_info.compression,pixels); if (status == MagickFalse) { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowReaderException(CorruptImageError, "UnableToRunlengthDecodeImage"); } } /* Convert BMP raster image to pixel packets. */ if (bmp_info.compression == BI_RGB) { bmp_info.alpha_mask=image->matte != MagickFalse ? 0xff000000U : 0U; bmp_info.red_mask=0x00ff0000U; bmp_info.green_mask=0x0000ff00U; bmp_info.blue_mask=0x000000ffU; if (bmp_info.bits_per_pixel == 16) { /* RGB555. */ bmp_info.red_mask=0x00007c00U; bmp_info.green_mask=0x000003e0U; bmp_info.blue_mask=0x0000001fU; } } if ((bmp_info.bits_per_pixel == 16) || (bmp_info.bits_per_pixel == 32)) { register size_t sample; /* Get shift and quantum bits info from bitfield masks. */ (void) ResetMagickMemory(&shift,0,sizeof(shift)); (void) ResetMagickMemory(&quantum_bits,0,sizeof(quantum_bits)); if (bmp_info.red_mask != 0) while (((bmp_info.red_mask << shift.red) & 0x80000000UL) == 0) shift.red++; if (bmp_info.green_mask != 0) while (((bmp_info.green_mask << shift.green) & 0x80000000UL) == 0) shift.green++; if (bmp_info.blue_mask != 0) while (((bmp_info.blue_mask << shift.blue) & 0x80000000UL) == 0) shift.blue++; if (bmp_info.alpha_mask != 0) while (((bmp_info.alpha_mask << shift.opacity) & 0x80000000UL) == 0) shift.opacity++; sample=shift.red; while (((bmp_info.red_mask << sample) & 0x80000000UL) != 0) sample++; quantum_bits.red=ClampToQuantum((MagickRealType) sample-shift.red); sample=shift.green; while (((bmp_info.green_mask << sample) & 0x80000000UL) != 0) sample++; quantum_bits.green=ClampToQuantum((MagickRealType) sample-shift.green); sample=shift.blue; while (((bmp_info.blue_mask << sample) & 0x80000000UL) != 0) sample++; quantum_bits.blue=ClampToQuantum((MagickRealType) sample-shift.blue); sample=shift.opacity; while (((bmp_info.alpha_mask << sample) & 0x80000000UL) != 0) sample++; quantum_bits.opacity=ClampToQuantum((MagickRealType) sample- shift.opacity); } switch (bmp_info.bits_per_pixel) { case 1: { /* Convert bitmap scanline. */ for (y=(ssize_t) image->rows-1; y >= 0; y--) { p=pixels+(image->rows-y-1)*bytes_per_line; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < ((ssize_t) image->columns-7); x+=8) { for (bit=0; bit < 8; bit++) { index=(IndexPacket) (((*p) & (0x80 >> bit)) != 0 ? 0x01 : 0x00); SetPixelIndex(indexes+x+bit,index); q++; } p++; } if ((image->columns % 8) != 0) { for (bit=0; bit < (image->columns % 8); bit++) { index=(IndexPacket) (((*p) & (0x80 >> bit)) != 0 ? 0x01 : 0x00); SetPixelIndex(indexes+x+bit,index); } p++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) (image->rows-y),image->rows); if (status == MagickFalse) break; } } (void) SyncImage(image); break; } case 4: { /* Convert PseudoColor scanline. */ for (y=(ssize_t) image->rows-1; y >= 0; y--) { p=pixels+(image->rows-y-1)*bytes_per_line; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < ((ssize_t) image->columns-1); x+=2) { index=ConstrainColormapIndex(image,(*p >> 4) & 0x0f); SetPixelIndex(indexes+x,index); index=ConstrainColormapIndex(image,*p & 0x0f); SetPixelIndex(indexes+x+1,index); p++; } if ((image->columns % 2) != 0) { index=ConstrainColormapIndex(image,(*p >> 4) & 0xf); SetPixelIndex(indexes+x,index); p++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) (image->rows-y),image->rows); if (status == MagickFalse) break; } } (void) SyncImage(image); break; } case 8: { /* Convert PseudoColor scanline. */ if ((bmp_info.compression == BI_RLE8) || (bmp_info.compression == BI_RLE4)) bytes_per_line=image->columns; for (y=(ssize_t) image->rows-1; y >= 0; y--) { p=pixels+(image->rows-y-1)*bytes_per_line; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=(ssize_t) image->columns; x != 0; --x) { index=ConstrainColormapIndex(image,*p); SetPixelIndex(indexes,index); indexes++; p++; q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; offset=(MagickOffsetType) (image->rows-y-1); if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) (image->rows-y),image->rows); if (status == MagickFalse) break; } } (void) SyncImage(image); break; } case 16: { size_t alpha, pixel; /* Convert bitfield encoded 16-bit PseudoColor scanline. */ if (bmp_info.compression != BI_RGB && bmp_info.compression != BI_BITFIELDS) { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowReaderException(CorruptImageError, "UnrecognizedImageCompression"); } bytes_per_line=2*(image->columns+image->columns % 2); image->storage_class=DirectClass; for (y=(ssize_t) image->rows-1; y >= 0; y--) { p=pixels+(image->rows-y-1)*bytes_per_line; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { pixel=(size_t) (*p++); pixel|=(*p++) << 8; red=((pixel & bmp_info.red_mask) << shift.red) >> 16; if (quantum_bits.red == 5) red|=((red & 0xe000) >> 5); if (quantum_bits.red <= 8) red|=((red & 0xff00) >> 8); green=((pixel & bmp_info.green_mask) << shift.green) >> 16; if (quantum_bits.green == 5) green|=((green & 0xe000) >> 5); if (quantum_bits.green == 6) green|=((green & 0xc000) >> 6); if (quantum_bits.green <= 8) green|=((green & 0xff00) >> 8); blue=((pixel & bmp_info.blue_mask) << shift.blue) >> 16; if (quantum_bits.blue == 5) blue|=((blue & 0xe000) >> 5); if (quantum_bits.blue <= 8) blue|=((blue & 0xff00) >> 8); alpha=((pixel & bmp_info.alpha_mask) << shift.opacity) >> 16; if (quantum_bits.opacity <= 8) alpha|=((alpha & 0xff00) >> 8); SetPixelRed(q,ScaleShortToQuantum((unsigned short) red)); SetPixelGreen(q,ScaleShortToQuantum((unsigned short) green)); SetPixelBlue(q,ScaleShortToQuantum((unsigned short) blue)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelAlpha(q,ScaleShortToQuantum((unsigned short) alpha)); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; offset=(MagickOffsetType) (image->rows-y-1); if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) (image->rows-y),image->rows); if (status == MagickFalse) break; } } break; } case 24: { /* Convert DirectColor scanline. */ bytes_per_line=4*((image->columns*24+31)/32); for (y=(ssize_t) image->rows-1; y >= 0; y--) { p=pixels+(image->rows-y-1)*bytes_per_line; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelBlue(q,ScaleCharToQuantum(*p++)); SetPixelGreen(q,ScaleCharToQuantum(*p++)); SetPixelRed(q,ScaleCharToQuantum(*p++)); SetPixelOpacity(q,OpaqueOpacity); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; offset=(MagickOffsetType) (image->rows-y-1); if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) (image->rows-y),image->rows); if (status == MagickFalse) break; } } break; } case 32: { /* Convert bitfield encoded DirectColor scanline. */ if ((bmp_info.compression != BI_RGB) && (bmp_info.compression != BI_BITFIELDS)) { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowReaderException(CorruptImageError, "UnrecognizedImageCompression"); } bytes_per_line=4*(image->columns); for (y=(ssize_t) image->rows-1; y >= 0; y--) { size_t alpha, pixel; p=pixels+(image->rows-y-1)*bytes_per_line; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { pixel=(size_t) (*p++); pixel|=((size_t) *p++ << 8); pixel|=((size_t) *p++ << 16); pixel|=((size_t) *p++ << 24); red=((pixel & bmp_info.red_mask) << shift.red) >> 16; if (quantum_bits.red == 8) red|=(red >> 8); green=((pixel & bmp_info.green_mask) << shift.green) >> 16; if (quantum_bits.green == 8) green|=(green >> 8); blue=((pixel & bmp_info.blue_mask) << shift.blue) >> 16; if (quantum_bits.blue == 8) blue|=(blue >> 8); alpha=((pixel & bmp_info.alpha_mask) << shift.opacity) >> 16; if (quantum_bits.opacity == 8) alpha|=(alpha >> 8); SetPixelRed(q,ScaleShortToQuantum((unsigned short) red)); SetPixelGreen(q,ScaleShortToQuantum((unsigned short) green)); SetPixelBlue(q,ScaleShortToQuantum((unsigned short) blue)); SetPixelAlpha(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelAlpha(q,ScaleShortToQuantum((unsigned short) alpha)); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; offset=(MagickOffsetType) (image->rows-y-1); if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) (image->rows-y),image->rows); if (status == MagickFalse) break; } } break; } default: { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } } pixel_info=RelinquishVirtualMemory(pixel_info); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } if (bmp_info.height < 0) { Image *flipped_image; /* Correct image orientation. */ flipped_image=FlipImage(image,exception); if (flipped_image != (Image *) NULL) { DuplicateBlob(flipped_image,image); image=DestroyImage(image); image=flipped_image; } } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; *magick='\0'; if (bmp_info.ba_offset != 0) { offset=SeekBlob(image,(MagickOffsetType) bmp_info.ba_offset,SEEK_SET); if (offset < 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } count=ReadBlob(image,2,magick); if ((count == 2) && (IsBMP(magick,2) != MagickFalse)) { /* Acquire next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } while (IsBMP(magick,2) != MagickFalse); (void) CloseBlob(image); return(GetFirstImageInList(image)); }
168,550
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: cf2_interpT2CharString( CF2_Font font, CF2_Buffer buf, CF2_OutlineCallbacks callbacks, const FT_Vector* translation, FT_Bool doingSeac, CF2_Fixed curX, CF2_Fixed curY, CF2_Fixed* width ) { /* lastError is used for errors that are immediately tested */ FT_Error lastError = FT_Err_Ok; /* pointer to parsed font object */ CFF_Decoder* decoder = font->decoder; FT_Error* error = &font->error; FT_Memory memory = font->memory; CF2_Fixed scaleY = font->innerTransform.d; CF2_Fixed nominalWidthX = cf2_getNominalWidthX( decoder ); /* save this for hinting seac accents */ CF2_Fixed hintOriginY = curY; CF2_Stack opStack = NULL; FT_Byte op1; /* first opcode byte */ /* instruction limit; 20,000,000 matches Avalon */ FT_UInt32 instructionLimit = 20000000UL; CF2_ArrStackRec subrStack; FT_Bool haveWidth; CF2_Buffer charstring = NULL; CF2_Int charstringIndex = -1; /* initialize to empty */ /* TODO: placeholders for hint structures */ /* objects used for hinting */ CF2_ArrStackRec hStemHintArray; CF2_ArrStackRec vStemHintArray; CF2_HintMaskRec hintMask; CF2_GlyphPathRec glyphPath; /* initialize the remaining objects */ cf2_arrstack_init( &subrStack, memory, error, sizeof ( CF2_BufferRec ) ); cf2_arrstack_init( &hStemHintArray, memory, error, sizeof ( CF2_StemHintRec ) ); cf2_arrstack_init( &vStemHintArray, memory, error, sizeof ( CF2_StemHintRec ) ); /* initialize CF2_StemHint arrays */ cf2_hintmask_init( &hintMask, error ); /* initialize path map to manage drawing operations */ /* Note: last 4 params are used to handle `MoveToPermissive', which */ /* may need to call `hintMap.Build' */ /* TODO: MoveToPermissive is gone; are these still needed? */ cf2_glyphpath_init( &glyphPath, font, callbacks, scaleY, /* hShift, */ &hStemHintArray, &vStemHintArray, &hintMask, hintOriginY, &font->blues, translation ); /* * Initialize state for width parsing. From the CFF Spec: * * The first stack-clearing operator, which must be one of hstem, * hstemhm, vstem, vstemhm, cntrmask, hintmask, hmoveto, vmoveto, * rmoveto, or endchar, takes an additional argument - the width (as * described earlier), which may be expressed as zero or one numeric * argument. * * What we implement here uses the first validly specified width, but * does not detect errors for specifying more than one width. * * If one of the above operators occurs without explicitly specifying * a width, we assume the default width. * */ haveWidth = FALSE; *width = cf2_getDefaultWidthX( decoder ); /* * Note: at this point, all pointers to resources must be NULL * and all local objects must be initialized. * There must be no branches to exit: above this point. * */ /* allocate an operand stack */ opStack = cf2_stack_init( memory, error ); if ( !opStack ) { lastError = FT_THROW( Out_Of_Memory ); goto exit; } /* initialize subroutine stack by placing top level charstring as */ /* first element (max depth plus one for the charstring) */ /* Note: Caller owns and must finalize the first charstring. */ /* Our copy of it does not change that requirement. */ cf2_arrstack_setCount( &subrStack, CF2_MAX_SUBR + 1 ); charstring = (CF2_Buffer)cf2_arrstack_getBuffer( &subrStack ); *charstring = *buf; /* structure copy */ charstringIndex = 0; /* entry is valid now */ /* catch errors so far */ if ( *error ) goto exit; /* main interpreter loop */ while ( 1 ) { if ( cf2_buf_isEnd( charstring ) ) { /* If we've reached the end of the charstring, simulate a */ /* cf2_cmdRETURN or cf2_cmdENDCHAR. */ if ( charstringIndex ) op1 = cf2_cmdRETURN; /* end of buffer for subroutine */ else op1 = cf2_cmdENDCHAR; /* end of buffer for top level charstring */ } else op1 = (FT_Byte)cf2_buf_readByte( charstring ); /* check for errors once per loop */ if ( *error ) goto exit; instructionLimit--; if ( instructionLimit == 0 ) { lastError = FT_THROW( Invalid_Glyph_Format ); goto exit; } switch( op1 ) { case cf2_cmdRESERVED_0: case cf2_cmdRESERVED_2: case cf2_cmdRESERVED_9: case cf2_cmdRESERVED_13: case cf2_cmdRESERVED_15: case cf2_cmdRESERVED_16: case cf2_cmdRESERVED_17: /* we may get here if we have a prior error */ FT_TRACE4(( " unknown op (%d)\n", op1 )); break; case cf2_cmdHSTEMHM: case cf2_cmdHSTEM: FT_TRACE4(( op1 == cf2_cmdHSTEMHM ? " hstemhm\n" : " hstem\n" )); /* never add hints after the mask is computed */ if ( cf2_hintmask_isValid( &hintMask ) ) FT_TRACE4(( "cf2_interpT2CharString:" " invalid horizontal hint mask\n" )); cf2_doStems( font, opStack, 0 ); if ( font->decoder->width_only ) goto exit; break; case cf2_cmdVSTEMHM: case cf2_cmdVSTEM: FT_TRACE4(( op1 == cf2_cmdVSTEMHM ? " vstemhm\n" : " vstem\n" )); /* never add hints after the mask is computed */ if ( cf2_hintmask_isValid( &hintMask ) ) /* never add hints after the mask is computed */ if ( cf2_hintmask_isValid( &hintMask ) ) FT_TRACE4(( "cf2_interpT2CharString:" " invalid vertical hint mask\n" )); cf2_doStems( font, opStack, goto exit; break; case cf2_cmdVMOVETO: FT_TRACE4(( " vmoveto\n" )); if ( cf2_stack_count( opStack ) > 1 && !haveWidth ) *width = cf2_stack_getReal( opStack, 0 ) + nominalWidthX; /* width is defined or default after this */ haveWidth = TRUE; if ( font->decoder->width_only ) goto exit; curY += cf2_stack_popFixed( opStack ); cf2_glyphpath_moveTo( &glyphPath, curX, curY ); break; case cf2_cmdRLINETO: { CF2_UInt index; CF2_UInt count = cf2_stack_count( opStack ); FT_TRACE4(( " rlineto\n" )); for ( index = 0; index < count; index += 2 ) { curX += cf2_stack_getReal( opStack, index + 0 ); curY += cf2_stack_getReal( opStack, index + 1 ); cf2_glyphpath_lineTo( &glyphPath, curX, curY ); } cf2_stack_clear( opStack ); } continue; /* no need to clear stack again */ case cf2_cmdHLINETO: case cf2_cmdVLINETO: { CF2_UInt index; CF2_UInt count = cf2_stack_count( opStack ); FT_Bool isX = op1 == cf2_cmdHLINETO; FT_TRACE4(( isX ? " hlineto\n" : " vlineto\n" )); for ( index = 0; index < count; index++ ) { CF2_Fixed v = cf2_stack_getReal( opStack, index ); if ( isX ) curX += v; else curY += v; isX = !isX; cf2_glyphpath_lineTo( &glyphPath, curX, curY ); } cf2_stack_clear( opStack ); } continue; case cf2_cmdRCURVELINE: case cf2_cmdRRCURVETO: { CF2_UInt count = cf2_stack_count( opStack ); CF2_UInt index = 0; FT_TRACE4(( op1 == cf2_cmdRCURVELINE ? " rcurveline\n" : " rrcurveto\n" )); while ( index + 6 <= count ) { CF2_Fixed x1 = cf2_stack_getReal( opStack, index + 0 ) + curX; CF2_Fixed y1 = cf2_stack_getReal( opStack, index + 1 ) + curY; CF2_Fixed x2 = cf2_stack_getReal( opStack, index + 2 ) + x1; CF2_Fixed y2 = cf2_stack_getReal( opStack, index + 3 ) + y1; CF2_Fixed x3 = cf2_stack_getReal( opStack, index + 4 ) + x2; CF2_Fixed y3 = cf2_stack_getReal( opStack, index + 5 ) + y2; cf2_glyphpath_curveTo( &glyphPath, x1, y1, x2, y2, x3, y3 ); curX = x3; curY = y3; index += 6; } if ( op1 == cf2_cmdRCURVELINE ) { curX += cf2_stack_getReal( opStack, index + 0 ); curY += cf2_stack_getReal( opStack, index + 1 ); cf2_glyphpath_lineTo( &glyphPath, curX, curY ); } cf2_stack_clear( opStack ); } continue; /* no need to clear stack again */ case cf2_cmdCALLGSUBR: case cf2_cmdCALLSUBR: { CF2_UInt subrIndex; FT_TRACE4(( op1 == cf2_cmdCALLGSUBR ? " callgsubr" : " callsubr" )); if ( charstringIndex > CF2_MAX_SUBR ) { /* max subr plus one for charstring */ lastError = FT_THROW( Invalid_Glyph_Format ); goto exit; /* overflow of stack */ } /* push our current CFF charstring region on subrStack */ charstring = (CF2_Buffer) cf2_arrstack_getPointer( &subrStack, charstringIndex + 1 ); /* set up the new CFF region and pointer */ subrIndex = cf2_stack_popInt( opStack ); switch ( op1 ) { case cf2_cmdCALLGSUBR: FT_TRACE4(( "(%d)\n", subrIndex + decoder->globals_bias )); if ( cf2_initGlobalRegionBuffer( decoder, subrIndex, charstring ) ) { lastError = FT_THROW( Invalid_Glyph_Format ); goto exit; /* subroutine lookup or stream error */ } break; default: /* cf2_cmdCALLSUBR */ FT_TRACE4(( "(%d)\n", subrIndex + decoder->locals_bias )); if ( cf2_initLocalRegionBuffer( decoder, subrIndex, charstring ) ) { lastError = FT_THROW( Invalid_Glyph_Format ); goto exit; /* subroutine lookup or stream error */ } } charstringIndex += 1; /* entry is valid now */ } continue; /* do not clear the stack */ case cf2_cmdRETURN: FT_TRACE4(( " return\n" )); if ( charstringIndex < 1 ) { /* Note: cannot return from top charstring */ lastError = FT_THROW( Invalid_Glyph_Format ); goto exit; /* underflow of stack */ } /* restore position in previous charstring */ charstring = (CF2_Buffer) cf2_arrstack_getPointer( &subrStack, --charstringIndex ); continue; /* do not clear the stack */ case cf2_cmdESC: { FT_Byte op2 = (FT_Byte)cf2_buf_readByte( charstring ); switch ( op2 ) { case cf2_escDOTSECTION: /* something about `flip type of locking' -- ignore it */ FT_TRACE4(( " dotsection\n" )); break; /* TODO: should these operators be supported? */ case cf2_escAND: /* in spec */ FT_TRACE4(( " and\n" )); CF2_FIXME; break; case cf2_escOR: /* in spec */ FT_TRACE4(( " or\n" )); CF2_FIXME; break; case cf2_escNOT: /* in spec */ FT_TRACE4(( " not\n" )); CF2_FIXME; break; case cf2_escABS: /* in spec */ FT_TRACE4(( " abs\n" )); CF2_FIXME; break; case cf2_escADD: /* in spec */ FT_TRACE4(( " add\n" )); CF2_FIXME; break; case cf2_escSUB: /* in spec */ FT_TRACE4(( " sub\n" )); CF2_FIXME; break; case cf2_escDIV: /* in spec */ FT_TRACE4(( " div\n" )); CF2_FIXME; break; case cf2_escNEG: /* in spec */ FT_TRACE4(( " neg\n" )); CF2_FIXME; break; case cf2_escEQ: /* in spec */ FT_TRACE4(( " eq\n" )); CF2_FIXME; break; case cf2_escDROP: /* in spec */ FT_TRACE4(( " drop\n" )); CF2_FIXME; break; case cf2_escPUT: /* in spec */ FT_TRACE4(( " put\n" )); CF2_FIXME; break; case cf2_escGET: /* in spec */ FT_TRACE4(( " get\n" )); CF2_FIXME; break; case cf2_escIFELSE: /* in spec */ FT_TRACE4(( " ifelse\n" )); CF2_FIXME; break; case cf2_escRANDOM: /* in spec */ FT_TRACE4(( " random\n" )); CF2_FIXME; break; case cf2_escMUL: /* in spec */ FT_TRACE4(( " mul\n" )); CF2_FIXME; break; case cf2_escSQRT: /* in spec */ FT_TRACE4(( " sqrt\n" )); CF2_FIXME; break; case cf2_escDUP: /* in spec */ FT_TRACE4(( " dup\n" )); CF2_FIXME; break; case cf2_escEXCH: /* in spec */ FT_TRACE4(( " exch\n" )); CF2_FIXME; break; case cf2_escINDEX: /* in spec */ FT_TRACE4(( " index\n" )); CF2_FIXME; break; case cf2_escROLL: /* in spec */ FT_TRACE4(( " roll\n" )); CF2_FIXME; break; case cf2_escHFLEX: { static const FT_Bool readFromStack[12] = { TRUE /* dx1 */, FALSE /* dy1 */, TRUE /* dx2 */, TRUE /* dy2 */, TRUE /* dx3 */, FALSE /* dy3 */, TRUE /* dx4 */, FALSE /* dy4 */, TRUE /* dx5 */, FALSE /* dy5 */, TRUE /* dx6 */, FALSE /* dy6 */ }; FT_TRACE4(( " hflex\n" )); cf2_doFlex( opStack, &curX, &curY, &glyphPath, readFromStack, FALSE /* doConditionalLastRead */ ); } continue; case cf2_escFLEX: { static const FT_Bool readFromStack[12] = { TRUE /* dx1 */, TRUE /* dy1 */, TRUE /* dx2 */, TRUE /* dy2 */, TRUE /* dx3 */, TRUE /* dy3 */, TRUE /* dx4 */, TRUE /* dy4 */, TRUE /* dx5 */, TRUE /* dy5 */, TRUE /* dx6 */, TRUE /* dy6 */ }; FT_TRACE4(( " flex\n" )); cf2_doFlex( opStack, &curX, &curY, &glyphPath, readFromStack, FALSE /* doConditionalLastRead */ ); } break; /* TODO: why is this not a continue? */ case cf2_escHFLEX1: { static const FT_Bool readFromStack[12] = { TRUE /* dx1 */, TRUE /* dy1 */, TRUE /* dx2 */, TRUE /* dy2 */, TRUE /* dx3 */, FALSE /* dy3 */, TRUE /* dx4 */, FALSE /* dy4 */, TRUE /* dx5 */, TRUE /* dy5 */, TRUE /* dx6 */, FALSE /* dy6 */ }; FT_TRACE4(( " hflex1\n" )); cf2_doFlex( opStack, &curX, &curY, &glyphPath, readFromStack, FALSE /* doConditionalLastRead */ ); } continue; case cf2_escFLEX1: { static const FT_Bool readFromStack[12] = { TRUE /* dx1 */, TRUE /* dy1 */, TRUE /* dx2 */, TRUE /* dy2 */, TRUE /* dx3 */, TRUE /* dy3 */, TRUE /* dx4 */, TRUE /* dy4 */, TRUE /* dx5 */, TRUE /* dy5 */, FALSE /* dx6 */, FALSE /* dy6 */ }; FT_TRACE4(( " flex1\n" )); cf2_doFlex( opStack, &curX, &curY, &glyphPath, readFromStack, TRUE /* doConditionalLastRead */ ); } continue; case cf2_escRESERVED_1: case cf2_escRESERVED_2: case cf2_escRESERVED_6: case cf2_escRESERVED_7: case cf2_escRESERVED_8: case cf2_escRESERVED_13: case cf2_escRESERVED_16: case cf2_escRESERVED_17: case cf2_escRESERVED_19: case cf2_escRESERVED_25: case cf2_escRESERVED_31: case cf2_escRESERVED_32: case cf2_escRESERVED_33: default: FT_TRACE4(( " unknown op (12, %d)\n", op2 )); }; /* end of switch statement checking `op2' */ } /* case cf2_cmdESC */ break; case cf2_cmdENDCHAR: FT_TRACE4(( " endchar\n" )); if ( cf2_stack_count( opStack ) == 1 || cf2_stack_count( opStack ) == 5 ) { if ( !haveWidth ) *width = cf2_stack_getReal( opStack, 0 ) + nominalWidthX; } /* width is defined or default after this */ haveWidth = TRUE; if ( font->decoder->width_only ) goto exit; /* close path if still open */ cf2_glyphpath_closeOpenPath( &glyphPath ); if ( cf2_stack_count( opStack ) > 1 ) { /* must be either 4 or 5 -- */ /* this is a (deprecated) implied `seac' operator */ CF2_UInt achar; CF2_UInt bchar; CF2_BufferRec component; CF2_Fixed dummyWidth; /* ignore component width */ FT_Error error2; if ( doingSeac ) { lastError = FT_THROW( Invalid_Glyph_Format ); goto exit; /* nested seac */ } achar = cf2_stack_popInt( opStack ); bchar = cf2_stack_popInt( opStack ); curY = cf2_stack_popFixed( opStack ); curX = cf2_stack_popFixed( opStack ); error2 = cf2_getSeacComponent( decoder, achar, &component ); if ( error2 ) { lastError = error2; /* pass FreeType error through */ goto exit; } cf2_interpT2CharString( font, &component, callbacks, translation, TRUE, curX, curY, &dummyWidth ); cf2_freeSeacComponent( decoder, &component ); error2 = cf2_getSeacComponent( decoder, bchar, &component ); if ( error2 ) { lastError = error2; /* pass FreeType error through */ goto exit; } cf2_interpT2CharString( font, &component, callbacks, translation, TRUE, 0, 0, &dummyWidth ); cf2_freeSeacComponent( decoder, &component ); } goto exit; case cf2_cmdCNTRMASK: case cf2_cmdHINTMASK: /* the final \n in the tracing message gets added in */ /* `cf2_hintmask_read' (which also traces the mask bytes) */ FT_TRACE4(( op1 == cf2_cmdCNTRMASK ? " cntrmask" : " hintmask" )); /* if there are arguments on the stack, there this is an */ /* implied cf2_cmdVSTEMHM */ if ( cf2_stack_count( opStack ) != 0 ) /* `cf2_hintmask_read' (which also traces the mask bytes) */ FT_TRACE4(( op1 == cf2_cmdCNTRMASK ? " cntrmask" : " hintmask" )); /* if there are arguments on the stack, there this is an */ /* implied cf2_cmdVSTEMHM */ if ( cf2_stack_count( opStack ) != 0 ) { /* never add hints after the mask is computed */ if ( cf2_hintmask_isValid( &hintMask ) ) FT_TRACE4(( "cf2_interpT2CharString: invalid hint mask\n" )); } cf2_doStems( font, opStack, &vStemHintArray, if ( op1 == cf2_cmdHINTMASK ) { /* consume the hint mask bytes which follow the operator */ cf2_hintmask_read( &hintMask, charstring, cf2_arrstack_size( &hStemHintArray ) + cf2_arrstack_size( &vStemHintArray ) ); } else { /* * Consume the counter mask bytes which follow the operator: * Build a temporary hint map, just to place and lock those * stems participating in the counter mask. These are most * likely the dominant hstems, and are grouped together in a * few counter groups, not necessarily in correspondence * with the hint groups. This reduces the chances of * conflicts between hstems that are initially placed in * separate hint groups and then brought together. The * positions are copied back to `hStemHintArray', so we can * discard `counterMask' and `counterHintMap'. * */ CF2_HintMapRec counterHintMap; CF2_HintMaskRec counterMask; cf2_hintmap_init( &counterHintMap, font, &glyphPath.initialHintMap, &glyphPath.hintMoves, scaleY ); cf2_hintmask_init( &counterMask, error ); cf2_hintmask_read( &counterMask, charstring, cf2_arrstack_size( &hStemHintArray ) + cf2_arrstack_size( &vStemHintArray ) ); cf2_hintmap_build( &counterHintMap, &hStemHintArray, &vStemHintArray, &counterMask, 0, FALSE ); } break; case cf2_cmdRMOVETO: FT_TRACE4(( " rmoveto\n" )); if ( cf2_stack_count( opStack ) > 2 && !haveWidth ) *width = cf2_stack_getReal( opStack, 0 ) + nominalWidthX; /* width is defined or default after this */ haveWidth = TRUE; if ( font->decoder->width_only ) goto exit; curY += cf2_stack_popFixed( opStack ); curX += cf2_stack_popFixed( opStack ); cf2_glyphpath_moveTo( &glyphPath, curX, curY ); break; case cf2_cmdHMOVETO: FT_TRACE4(( " hmoveto\n" )); if ( cf2_stack_count( opStack ) > 1 && !haveWidth ) *width = cf2_stack_getReal( opStack, 0 ) + nominalWidthX; /* width is defined or default after this */ haveWidth = TRUE; if ( font->decoder->width_only ) goto exit; curX += cf2_stack_popFixed( opStack ); cf2_glyphpath_moveTo( &glyphPath, curX, curY ); break; case cf2_cmdRLINECURVE: { CF2_UInt count = cf2_stack_count( opStack ); CF2_UInt index = 0; FT_TRACE4(( " rlinecurve\n" )); while ( index + 6 < count ) { curX += cf2_stack_getReal( opStack, index + 0 ); curY += cf2_stack_getReal( opStack, index + 1 ); cf2_glyphpath_lineTo( &glyphPath, curX, curY ); index += 2; } while ( index < count ) { CF2_Fixed x1 = cf2_stack_getReal( opStack, index + 0 ) + curX; CF2_Fixed y1 = cf2_stack_getReal( opStack, index + 1 ) + curY; CF2_Fixed x2 = cf2_stack_getReal( opStack, index + 2 ) + x1; CF2_Fixed y2 = cf2_stack_getReal( opStack, index + 3 ) + y1; CF2_Fixed x3 = cf2_stack_getReal( opStack, index + 4 ) + x2; CF2_Fixed y3 = cf2_stack_getReal( opStack, index + 5 ) + y2; cf2_glyphpath_curveTo( &glyphPath, x1, y1, x2, y2, x3, y3 ); curX = x3; curY = y3; index += 6; } cf2_stack_clear( opStack ); } continue; /* no need to clear stack again */ case cf2_cmdVVCURVETO: { CF2_UInt count = cf2_stack_count( opStack ); CF2_UInt index = 0; FT_TRACE4(( " vvcurveto\n" )); while ( index < count ) { CF2_Fixed x1, y1, x2, y2, x3, y3; if ( ( count - index ) & 1 ) { x1 = cf2_stack_getReal( opStack, index ) + curX; ++index; } else x1 = curX; y1 = cf2_stack_getReal( opStack, index + 0 ) + curY; x2 = cf2_stack_getReal( opStack, index + 1 ) + x1; y2 = cf2_stack_getReal( opStack, index + 2 ) + y1; x3 = x2; y3 = cf2_stack_getReal( opStack, index + 3 ) + y2; cf2_glyphpath_curveTo( &glyphPath, x1, y1, x2, y2, x3, y3 ); curX = x3; curY = y3; index += 4; } cf2_stack_clear( opStack ); } continue; /* no need to clear stack again */ case cf2_cmdHHCURVETO: { CF2_UInt count = cf2_stack_count( opStack ); CF2_UInt index = 0; FT_TRACE4(( " hhcurveto\n" )); while ( index < count ) { CF2_Fixed x1, y1, x2, y2, x3, y3; if ( ( count - index ) & 1 ) { y1 = cf2_stack_getReal( opStack, index ) + curY; ++index; } else y1 = curY; x1 = cf2_stack_getReal( opStack, index + 0 ) + curX; x2 = cf2_stack_getReal( opStack, index + 1 ) + x1; y2 = cf2_stack_getReal( opStack, index + 2 ) + y1; x3 = cf2_stack_getReal( opStack, index + 3 ) + x2; y3 = y2; cf2_glyphpath_curveTo( &glyphPath, x1, y1, x2, y2, x3, y3 ); curX = x3; curY = y3; index += 4; } cf2_stack_clear( opStack ); } continue; /* no need to clear stack again */ case cf2_cmdVHCURVETO: case cf2_cmdHVCURVETO: { CF2_UInt count = cf2_stack_count( opStack ); CF2_UInt index = 0; FT_Bool alternate = op1 == cf2_cmdHVCURVETO; FT_TRACE4(( alternate ? " hvcurveto\n" : " vhcurveto\n" )); while ( index < count ) { CF2_Fixed x1, x2, x3, y1, y2, y3; if ( alternate ) { x1 = cf2_stack_getReal( opStack, index + 0 ) + curX; y1 = curY; x2 = cf2_stack_getReal( opStack, index + 1 ) + x1; y2 = cf2_stack_getReal( opStack, index + 2 ) + y1; y3 = cf2_stack_getReal( opStack, index + 3 ) + y2; if ( count - index == 5 ) { x3 = cf2_stack_getReal( opStack, index + 4 ) + x2; ++index; } else x3 = x2; alternate = FALSE; } else { x1 = curX; y1 = cf2_stack_getReal( opStack, index + 0 ) + curY; x2 = cf2_stack_getReal( opStack, index + 1 ) + x1; y2 = cf2_stack_getReal( opStack, index + 2 ) + y1; x3 = cf2_stack_getReal( opStack, index + 3 ) + x2; if ( count - index == 5 ) { y3 = cf2_stack_getReal( opStack, index + 4 ) + y2; ++index; } else y3 = y2; alternate = TRUE; } cf2_glyphpath_curveTo( &glyphPath, x1, y1, x2, y2, x3, y3 ); curX = x3; curY = y3; index += 4; } cf2_stack_clear( opStack ); } continue; /* no need to clear stack again */ case cf2_cmdEXTENDEDNMBR: { CF2_Int v; v = (FT_Short)( ( cf2_buf_readByte( charstring ) << 8 ) | cf2_buf_readByte( charstring ) ); FT_TRACE4(( " %d", v )); cf2_stack_pushInt( opStack, v ); } continue; default: /* numbers */ { if ( /* op1 >= 32 && */ op1 <= 246 ) { CF2_Int v; v = op1 - 139; FT_TRACE4(( " %d", v )); /* -107 .. 107 */ cf2_stack_pushInt( opStack, v ); } else if ( /* op1 >= 247 && */ op1 <= 250 ) { CF2_Int v; v = op1; v -= 247; v *= 256; v += cf2_buf_readByte( charstring ); v += 108; FT_TRACE4(( " %d", v )); /* 108 .. 1131 */ cf2_stack_pushInt( opStack, v ); } else if ( /* op1 >= 251 && */ op1 <= 254 ) { CF2_Int v; v = op1; v -= 251; v *= 256; v += cf2_buf_readByte( charstring ); v = -v - 108; FT_TRACE4(( " %d", v )); /* -1131 .. -108 */ cf2_stack_pushInt( opStack, v ); } else /* op1 == 255 */ { CF2_Fixed v; v = (CF2_Fixed) ( ( (FT_UInt32)cf2_buf_readByte( charstring ) << 24 ) | ( (FT_UInt32)cf2_buf_readByte( charstring ) << 16 ) | ( (FT_UInt32)cf2_buf_readByte( charstring ) << 8 ) | (FT_UInt32)cf2_buf_readByte( charstring ) ); FT_TRACE4(( " %.2f", v / 65536.0 )); cf2_stack_pushFixed( opStack, v ); } } continue; /* don't clear stack */ } /* end of switch statement checking `op1' */ cf2_stack_clear( opStack ); } /* end of main interpreter loop */ /* we get here if the charstring ends without cf2_cmdENDCHAR */ FT_TRACE4(( "cf2_interpT2CharString:" " charstring ends without ENDCHAR\n" )); exit: /* check whether last error seen is also the first one */ cf2_setError( error, lastError ); /* free resources from objects we've used */ cf2_glyphpath_finalize( &glyphPath ); cf2_arrstack_finalize( &vStemHintArray ); cf2_arrstack_finalize( &hStemHintArray ); cf2_arrstack_finalize( &subrStack ); cf2_stack_free( opStack ); FT_TRACE4(( "\n" )); return; } Commit Message: CWE ID: CWE-119
cf2_interpT2CharString( CF2_Font font, CF2_Buffer buf, CF2_OutlineCallbacks callbacks, const FT_Vector* translation, FT_Bool doingSeac, CF2_Fixed curX, CF2_Fixed curY, CF2_Fixed* width ) { /* lastError is used for errors that are immediately tested */ FT_Error lastError = FT_Err_Ok; /* pointer to parsed font object */ CFF_Decoder* decoder = font->decoder; FT_Error* error = &font->error; FT_Memory memory = font->memory; CF2_Fixed scaleY = font->innerTransform.d; CF2_Fixed nominalWidthX = cf2_getNominalWidthX( decoder ); /* save this for hinting seac accents */ CF2_Fixed hintOriginY = curY; CF2_Stack opStack = NULL; FT_Byte op1; /* first opcode byte */ /* instruction limit; 20,000,000 matches Avalon */ FT_UInt32 instructionLimit = 20000000UL; CF2_ArrStackRec subrStack; FT_Bool haveWidth; CF2_Buffer charstring = NULL; CF2_Int charstringIndex = -1; /* initialize to empty */ /* TODO: placeholders for hint structures */ /* objects used for hinting */ CF2_ArrStackRec hStemHintArray; CF2_ArrStackRec vStemHintArray; CF2_HintMaskRec hintMask; CF2_GlyphPathRec glyphPath; /* initialize the remaining objects */ cf2_arrstack_init( &subrStack, memory, error, sizeof ( CF2_BufferRec ) ); cf2_arrstack_init( &hStemHintArray, memory, error, sizeof ( CF2_StemHintRec ) ); cf2_arrstack_init( &vStemHintArray, memory, error, sizeof ( CF2_StemHintRec ) ); /* initialize CF2_StemHint arrays */ cf2_hintmask_init( &hintMask, error ); /* initialize path map to manage drawing operations */ /* Note: last 4 params are used to handle `MoveToPermissive', which */ /* may need to call `hintMap.Build' */ /* TODO: MoveToPermissive is gone; are these still needed? */ cf2_glyphpath_init( &glyphPath, font, callbacks, scaleY, /* hShift, */ &hStemHintArray, &vStemHintArray, &hintMask, hintOriginY, &font->blues, translation ); /* * Initialize state for width parsing. From the CFF Spec: * * The first stack-clearing operator, which must be one of hstem, * hstemhm, vstem, vstemhm, cntrmask, hintmask, hmoveto, vmoveto, * rmoveto, or endchar, takes an additional argument - the width (as * described earlier), which may be expressed as zero or one numeric * argument. * * What we implement here uses the first validly specified width, but * does not detect errors for specifying more than one width. * * If one of the above operators occurs without explicitly specifying * a width, we assume the default width. * */ haveWidth = FALSE; *width = cf2_getDefaultWidthX( decoder ); /* * Note: at this point, all pointers to resources must be NULL * and all local objects must be initialized. * There must be no branches to exit: above this point. * */ /* allocate an operand stack */ opStack = cf2_stack_init( memory, error ); if ( !opStack ) { lastError = FT_THROW( Out_Of_Memory ); goto exit; } /* initialize subroutine stack by placing top level charstring as */ /* first element (max depth plus one for the charstring) */ /* Note: Caller owns and must finalize the first charstring. */ /* Our copy of it does not change that requirement. */ cf2_arrstack_setCount( &subrStack, CF2_MAX_SUBR + 1 ); charstring = (CF2_Buffer)cf2_arrstack_getBuffer( &subrStack ); *charstring = *buf; /* structure copy */ charstringIndex = 0; /* entry is valid now */ /* catch errors so far */ if ( *error ) goto exit; /* main interpreter loop */ while ( 1 ) { if ( cf2_buf_isEnd( charstring ) ) { /* If we've reached the end of the charstring, simulate a */ /* cf2_cmdRETURN or cf2_cmdENDCHAR. */ if ( charstringIndex ) op1 = cf2_cmdRETURN; /* end of buffer for subroutine */ else op1 = cf2_cmdENDCHAR; /* end of buffer for top level charstring */ } else op1 = (FT_Byte)cf2_buf_readByte( charstring ); /* check for errors once per loop */ if ( *error ) goto exit; instructionLimit--; if ( instructionLimit == 0 ) { lastError = FT_THROW( Invalid_Glyph_Format ); goto exit; } switch( op1 ) { case cf2_cmdRESERVED_0: case cf2_cmdRESERVED_2: case cf2_cmdRESERVED_9: case cf2_cmdRESERVED_13: case cf2_cmdRESERVED_15: case cf2_cmdRESERVED_16: case cf2_cmdRESERVED_17: /* we may get here if we have a prior error */ FT_TRACE4(( " unknown op (%d)\n", op1 )); break; case cf2_cmdHSTEMHM: case cf2_cmdHSTEM: FT_TRACE4(( op1 == cf2_cmdHSTEMHM ? " hstemhm\n" : " hstem\n" )); /* never add hints after the mask is computed */ if ( cf2_hintmask_isValid( &hintMask ) ) { FT_TRACE4(( "cf2_interpT2CharString:" " invalid horizontal hint mask\n" )); break; } cf2_doStems( font, opStack, 0 ); if ( font->decoder->width_only ) goto exit; break; case cf2_cmdVSTEMHM: case cf2_cmdVSTEM: FT_TRACE4(( op1 == cf2_cmdVSTEMHM ? " vstemhm\n" : " vstem\n" )); /* never add hints after the mask is computed */ if ( cf2_hintmask_isValid( &hintMask ) ) /* never add hints after the mask is computed */ if ( cf2_hintmask_isValid( &hintMask ) ) { FT_TRACE4(( "cf2_interpT2CharString:" " invalid vertical hint mask\n" )); break; } cf2_doStems( font, opStack, goto exit; break; case cf2_cmdVMOVETO: FT_TRACE4(( " vmoveto\n" )); if ( cf2_stack_count( opStack ) > 1 && !haveWidth ) *width = cf2_stack_getReal( opStack, 0 ) + nominalWidthX; /* width is defined or default after this */ haveWidth = TRUE; if ( font->decoder->width_only ) goto exit; curY += cf2_stack_popFixed( opStack ); cf2_glyphpath_moveTo( &glyphPath, curX, curY ); break; case cf2_cmdRLINETO: { CF2_UInt index; CF2_UInt count = cf2_stack_count( opStack ); FT_TRACE4(( " rlineto\n" )); for ( index = 0; index < count; index += 2 ) { curX += cf2_stack_getReal( opStack, index + 0 ); curY += cf2_stack_getReal( opStack, index + 1 ); cf2_glyphpath_lineTo( &glyphPath, curX, curY ); } cf2_stack_clear( opStack ); } continue; /* no need to clear stack again */ case cf2_cmdHLINETO: case cf2_cmdVLINETO: { CF2_UInt index; CF2_UInt count = cf2_stack_count( opStack ); FT_Bool isX = op1 == cf2_cmdHLINETO; FT_TRACE4(( isX ? " hlineto\n" : " vlineto\n" )); for ( index = 0; index < count; index++ ) { CF2_Fixed v = cf2_stack_getReal( opStack, index ); if ( isX ) curX += v; else curY += v; isX = !isX; cf2_glyphpath_lineTo( &glyphPath, curX, curY ); } cf2_stack_clear( opStack ); } continue; case cf2_cmdRCURVELINE: case cf2_cmdRRCURVETO: { CF2_UInt count = cf2_stack_count( opStack ); CF2_UInt index = 0; FT_TRACE4(( op1 == cf2_cmdRCURVELINE ? " rcurveline\n" : " rrcurveto\n" )); while ( index + 6 <= count ) { CF2_Fixed x1 = cf2_stack_getReal( opStack, index + 0 ) + curX; CF2_Fixed y1 = cf2_stack_getReal( opStack, index + 1 ) + curY; CF2_Fixed x2 = cf2_stack_getReal( opStack, index + 2 ) + x1; CF2_Fixed y2 = cf2_stack_getReal( opStack, index + 3 ) + y1; CF2_Fixed x3 = cf2_stack_getReal( opStack, index + 4 ) + x2; CF2_Fixed y3 = cf2_stack_getReal( opStack, index + 5 ) + y2; cf2_glyphpath_curveTo( &glyphPath, x1, y1, x2, y2, x3, y3 ); curX = x3; curY = y3; index += 6; } if ( op1 == cf2_cmdRCURVELINE ) { curX += cf2_stack_getReal( opStack, index + 0 ); curY += cf2_stack_getReal( opStack, index + 1 ); cf2_glyphpath_lineTo( &glyphPath, curX, curY ); } cf2_stack_clear( opStack ); } continue; /* no need to clear stack again */ case cf2_cmdCALLGSUBR: case cf2_cmdCALLSUBR: { CF2_UInt subrIndex; FT_TRACE4(( op1 == cf2_cmdCALLGSUBR ? " callgsubr" : " callsubr" )); if ( charstringIndex > CF2_MAX_SUBR ) { /* max subr plus one for charstring */ lastError = FT_THROW( Invalid_Glyph_Format ); goto exit; /* overflow of stack */ } /* push our current CFF charstring region on subrStack */ charstring = (CF2_Buffer) cf2_arrstack_getPointer( &subrStack, charstringIndex + 1 ); /* set up the new CFF region and pointer */ subrIndex = cf2_stack_popInt( opStack ); switch ( op1 ) { case cf2_cmdCALLGSUBR: FT_TRACE4(( "(%d)\n", subrIndex + decoder->globals_bias )); if ( cf2_initGlobalRegionBuffer( decoder, subrIndex, charstring ) ) { lastError = FT_THROW( Invalid_Glyph_Format ); goto exit; /* subroutine lookup or stream error */ } break; default: /* cf2_cmdCALLSUBR */ FT_TRACE4(( "(%d)\n", subrIndex + decoder->locals_bias )); if ( cf2_initLocalRegionBuffer( decoder, subrIndex, charstring ) ) { lastError = FT_THROW( Invalid_Glyph_Format ); goto exit; /* subroutine lookup or stream error */ } } charstringIndex += 1; /* entry is valid now */ } continue; /* do not clear the stack */ case cf2_cmdRETURN: FT_TRACE4(( " return\n" )); if ( charstringIndex < 1 ) { /* Note: cannot return from top charstring */ lastError = FT_THROW( Invalid_Glyph_Format ); goto exit; /* underflow of stack */ } /* restore position in previous charstring */ charstring = (CF2_Buffer) cf2_arrstack_getPointer( &subrStack, --charstringIndex ); continue; /* do not clear the stack */ case cf2_cmdESC: { FT_Byte op2 = (FT_Byte)cf2_buf_readByte( charstring ); switch ( op2 ) { case cf2_escDOTSECTION: /* something about `flip type of locking' -- ignore it */ FT_TRACE4(( " dotsection\n" )); break; /* TODO: should these operators be supported? */ case cf2_escAND: /* in spec */ FT_TRACE4(( " and\n" )); CF2_FIXME; break; case cf2_escOR: /* in spec */ FT_TRACE4(( " or\n" )); CF2_FIXME; break; case cf2_escNOT: /* in spec */ FT_TRACE4(( " not\n" )); CF2_FIXME; break; case cf2_escABS: /* in spec */ FT_TRACE4(( " abs\n" )); CF2_FIXME; break; case cf2_escADD: /* in spec */ FT_TRACE4(( " add\n" )); CF2_FIXME; break; case cf2_escSUB: /* in spec */ FT_TRACE4(( " sub\n" )); CF2_FIXME; break; case cf2_escDIV: /* in spec */ FT_TRACE4(( " div\n" )); CF2_FIXME; break; case cf2_escNEG: /* in spec */ FT_TRACE4(( " neg\n" )); CF2_FIXME; break; case cf2_escEQ: /* in spec */ FT_TRACE4(( " eq\n" )); CF2_FIXME; break; case cf2_escDROP: /* in spec */ FT_TRACE4(( " drop\n" )); CF2_FIXME; break; case cf2_escPUT: /* in spec */ FT_TRACE4(( " put\n" )); CF2_FIXME; break; case cf2_escGET: /* in spec */ FT_TRACE4(( " get\n" )); CF2_FIXME; break; case cf2_escIFELSE: /* in spec */ FT_TRACE4(( " ifelse\n" )); CF2_FIXME; break; case cf2_escRANDOM: /* in spec */ FT_TRACE4(( " random\n" )); CF2_FIXME; break; case cf2_escMUL: /* in spec */ FT_TRACE4(( " mul\n" )); CF2_FIXME; break; case cf2_escSQRT: /* in spec */ FT_TRACE4(( " sqrt\n" )); CF2_FIXME; break; case cf2_escDUP: /* in spec */ FT_TRACE4(( " dup\n" )); CF2_FIXME; break; case cf2_escEXCH: /* in spec */ FT_TRACE4(( " exch\n" )); CF2_FIXME; break; case cf2_escINDEX: /* in spec */ FT_TRACE4(( " index\n" )); CF2_FIXME; break; case cf2_escROLL: /* in spec */ FT_TRACE4(( " roll\n" )); CF2_FIXME; break; case cf2_escHFLEX: { static const FT_Bool readFromStack[12] = { TRUE /* dx1 */, FALSE /* dy1 */, TRUE /* dx2 */, TRUE /* dy2 */, TRUE /* dx3 */, FALSE /* dy3 */, TRUE /* dx4 */, FALSE /* dy4 */, TRUE /* dx5 */, FALSE /* dy5 */, TRUE /* dx6 */, FALSE /* dy6 */ }; FT_TRACE4(( " hflex\n" )); cf2_doFlex( opStack, &curX, &curY, &glyphPath, readFromStack, FALSE /* doConditionalLastRead */ ); } continue; case cf2_escFLEX: { static const FT_Bool readFromStack[12] = { TRUE /* dx1 */, TRUE /* dy1 */, TRUE /* dx2 */, TRUE /* dy2 */, TRUE /* dx3 */, TRUE /* dy3 */, TRUE /* dx4 */, TRUE /* dy4 */, TRUE /* dx5 */, TRUE /* dy5 */, TRUE /* dx6 */, TRUE /* dy6 */ }; FT_TRACE4(( " flex\n" )); cf2_doFlex( opStack, &curX, &curY, &glyphPath, readFromStack, FALSE /* doConditionalLastRead */ ); } break; /* TODO: why is this not a continue? */ case cf2_escHFLEX1: { static const FT_Bool readFromStack[12] = { TRUE /* dx1 */, TRUE /* dy1 */, TRUE /* dx2 */, TRUE /* dy2 */, TRUE /* dx3 */, FALSE /* dy3 */, TRUE /* dx4 */, FALSE /* dy4 */, TRUE /* dx5 */, TRUE /* dy5 */, TRUE /* dx6 */, FALSE /* dy6 */ }; FT_TRACE4(( " hflex1\n" )); cf2_doFlex( opStack, &curX, &curY, &glyphPath, readFromStack, FALSE /* doConditionalLastRead */ ); } continue; case cf2_escFLEX1: { static const FT_Bool readFromStack[12] = { TRUE /* dx1 */, TRUE /* dy1 */, TRUE /* dx2 */, TRUE /* dy2 */, TRUE /* dx3 */, TRUE /* dy3 */, TRUE /* dx4 */, TRUE /* dy4 */, TRUE /* dx5 */, TRUE /* dy5 */, FALSE /* dx6 */, FALSE /* dy6 */ }; FT_TRACE4(( " flex1\n" )); cf2_doFlex( opStack, &curX, &curY, &glyphPath, readFromStack, TRUE /* doConditionalLastRead */ ); } continue; case cf2_escRESERVED_1: case cf2_escRESERVED_2: case cf2_escRESERVED_6: case cf2_escRESERVED_7: case cf2_escRESERVED_8: case cf2_escRESERVED_13: case cf2_escRESERVED_16: case cf2_escRESERVED_17: case cf2_escRESERVED_19: case cf2_escRESERVED_25: case cf2_escRESERVED_31: case cf2_escRESERVED_32: case cf2_escRESERVED_33: default: FT_TRACE4(( " unknown op (12, %d)\n", op2 )); }; /* end of switch statement checking `op2' */ } /* case cf2_cmdESC */ break; case cf2_cmdENDCHAR: FT_TRACE4(( " endchar\n" )); if ( cf2_stack_count( opStack ) == 1 || cf2_stack_count( opStack ) == 5 ) { if ( !haveWidth ) *width = cf2_stack_getReal( opStack, 0 ) + nominalWidthX; } /* width is defined or default after this */ haveWidth = TRUE; if ( font->decoder->width_only ) goto exit; /* close path if still open */ cf2_glyphpath_closeOpenPath( &glyphPath ); if ( cf2_stack_count( opStack ) > 1 ) { /* must be either 4 or 5 -- */ /* this is a (deprecated) implied `seac' operator */ CF2_UInt achar; CF2_UInt bchar; CF2_BufferRec component; CF2_Fixed dummyWidth; /* ignore component width */ FT_Error error2; if ( doingSeac ) { lastError = FT_THROW( Invalid_Glyph_Format ); goto exit; /* nested seac */ } achar = cf2_stack_popInt( opStack ); bchar = cf2_stack_popInt( opStack ); curY = cf2_stack_popFixed( opStack ); curX = cf2_stack_popFixed( opStack ); error2 = cf2_getSeacComponent( decoder, achar, &component ); if ( error2 ) { lastError = error2; /* pass FreeType error through */ goto exit; } cf2_interpT2CharString( font, &component, callbacks, translation, TRUE, curX, curY, &dummyWidth ); cf2_freeSeacComponent( decoder, &component ); error2 = cf2_getSeacComponent( decoder, bchar, &component ); if ( error2 ) { lastError = error2; /* pass FreeType error through */ goto exit; } cf2_interpT2CharString( font, &component, callbacks, translation, TRUE, 0, 0, &dummyWidth ); cf2_freeSeacComponent( decoder, &component ); } goto exit; case cf2_cmdCNTRMASK: case cf2_cmdHINTMASK: /* the final \n in the tracing message gets added in */ /* `cf2_hintmask_read' (which also traces the mask bytes) */ FT_TRACE4(( op1 == cf2_cmdCNTRMASK ? " cntrmask" : " hintmask" )); /* if there are arguments on the stack, there this is an */ /* implied cf2_cmdVSTEMHM */ if ( cf2_stack_count( opStack ) != 0 ) /* `cf2_hintmask_read' (which also traces the mask bytes) */ FT_TRACE4(( op1 == cf2_cmdCNTRMASK ? " cntrmask" : " hintmask" )); /* never add hints after the mask is computed */ if ( cf2_stack_count( opStack ) > 1 && cf2_hintmask_isValid( &hintMask ) ) { FT_TRACE4(( "cf2_interpT2CharString: invalid hint mask\n" )); break; } /* if there are arguments on the stack, there this is an */ /* implied cf2_cmdVSTEMHM */ cf2_doStems( font, opStack, &vStemHintArray, if ( op1 == cf2_cmdHINTMASK ) { /* consume the hint mask bytes which follow the operator */ cf2_hintmask_read( &hintMask, charstring, cf2_arrstack_size( &hStemHintArray ) + cf2_arrstack_size( &vStemHintArray ) ); } else { /* * Consume the counter mask bytes which follow the operator: * Build a temporary hint map, just to place and lock those * stems participating in the counter mask. These are most * likely the dominant hstems, and are grouped together in a * few counter groups, not necessarily in correspondence * with the hint groups. This reduces the chances of * conflicts between hstems that are initially placed in * separate hint groups and then brought together. The * positions are copied back to `hStemHintArray', so we can * discard `counterMask' and `counterHintMap'. * */ CF2_HintMapRec counterHintMap; CF2_HintMaskRec counterMask; cf2_hintmap_init( &counterHintMap, font, &glyphPath.initialHintMap, &glyphPath.hintMoves, scaleY ); cf2_hintmask_init( &counterMask, error ); cf2_hintmask_read( &counterMask, charstring, cf2_arrstack_size( &hStemHintArray ) + cf2_arrstack_size( &vStemHintArray ) ); cf2_hintmap_build( &counterHintMap, &hStemHintArray, &vStemHintArray, &counterMask, 0, FALSE ); } break; case cf2_cmdRMOVETO: FT_TRACE4(( " rmoveto\n" )); if ( cf2_stack_count( opStack ) > 2 && !haveWidth ) *width = cf2_stack_getReal( opStack, 0 ) + nominalWidthX; /* width is defined or default after this */ haveWidth = TRUE; if ( font->decoder->width_only ) goto exit; curY += cf2_stack_popFixed( opStack ); curX += cf2_stack_popFixed( opStack ); cf2_glyphpath_moveTo( &glyphPath, curX, curY ); break; case cf2_cmdHMOVETO: FT_TRACE4(( " hmoveto\n" )); if ( cf2_stack_count( opStack ) > 1 && !haveWidth ) *width = cf2_stack_getReal( opStack, 0 ) + nominalWidthX; /* width is defined or default after this */ haveWidth = TRUE; if ( font->decoder->width_only ) goto exit; curX += cf2_stack_popFixed( opStack ); cf2_glyphpath_moveTo( &glyphPath, curX, curY ); break; case cf2_cmdRLINECURVE: { CF2_UInt count = cf2_stack_count( opStack ); CF2_UInt index = 0; FT_TRACE4(( " rlinecurve\n" )); while ( index + 6 < count ) { curX += cf2_stack_getReal( opStack, index + 0 ); curY += cf2_stack_getReal( opStack, index + 1 ); cf2_glyphpath_lineTo( &glyphPath, curX, curY ); index += 2; } while ( index < count ) { CF2_Fixed x1 = cf2_stack_getReal( opStack, index + 0 ) + curX; CF2_Fixed y1 = cf2_stack_getReal( opStack, index + 1 ) + curY; CF2_Fixed x2 = cf2_stack_getReal( opStack, index + 2 ) + x1; CF2_Fixed y2 = cf2_stack_getReal( opStack, index + 3 ) + y1; CF2_Fixed x3 = cf2_stack_getReal( opStack, index + 4 ) + x2; CF2_Fixed y3 = cf2_stack_getReal( opStack, index + 5 ) + y2; cf2_glyphpath_curveTo( &glyphPath, x1, y1, x2, y2, x3, y3 ); curX = x3; curY = y3; index += 6; } cf2_stack_clear( opStack ); } continue; /* no need to clear stack again */ case cf2_cmdVVCURVETO: { CF2_UInt count = cf2_stack_count( opStack ); CF2_UInt index = 0; FT_TRACE4(( " vvcurveto\n" )); while ( index < count ) { CF2_Fixed x1, y1, x2, y2, x3, y3; if ( ( count - index ) & 1 ) { x1 = cf2_stack_getReal( opStack, index ) + curX; ++index; } else x1 = curX; y1 = cf2_stack_getReal( opStack, index + 0 ) + curY; x2 = cf2_stack_getReal( opStack, index + 1 ) + x1; y2 = cf2_stack_getReal( opStack, index + 2 ) + y1; x3 = x2; y3 = cf2_stack_getReal( opStack, index + 3 ) + y2; cf2_glyphpath_curveTo( &glyphPath, x1, y1, x2, y2, x3, y3 ); curX = x3; curY = y3; index += 4; } cf2_stack_clear( opStack ); } continue; /* no need to clear stack again */ case cf2_cmdHHCURVETO: { CF2_UInt count = cf2_stack_count( opStack ); CF2_UInt index = 0; FT_TRACE4(( " hhcurveto\n" )); while ( index < count ) { CF2_Fixed x1, y1, x2, y2, x3, y3; if ( ( count - index ) & 1 ) { y1 = cf2_stack_getReal( opStack, index ) + curY; ++index; } else y1 = curY; x1 = cf2_stack_getReal( opStack, index + 0 ) + curX; x2 = cf2_stack_getReal( opStack, index + 1 ) + x1; y2 = cf2_stack_getReal( opStack, index + 2 ) + y1; x3 = cf2_stack_getReal( opStack, index + 3 ) + x2; y3 = y2; cf2_glyphpath_curveTo( &glyphPath, x1, y1, x2, y2, x3, y3 ); curX = x3; curY = y3; index += 4; } cf2_stack_clear( opStack ); } continue; /* no need to clear stack again */ case cf2_cmdVHCURVETO: case cf2_cmdHVCURVETO: { CF2_UInt count = cf2_stack_count( opStack ); CF2_UInt index = 0; FT_Bool alternate = op1 == cf2_cmdHVCURVETO; FT_TRACE4(( alternate ? " hvcurveto\n" : " vhcurveto\n" )); while ( index < count ) { CF2_Fixed x1, x2, x3, y1, y2, y3; if ( alternate ) { x1 = cf2_stack_getReal( opStack, index + 0 ) + curX; y1 = curY; x2 = cf2_stack_getReal( opStack, index + 1 ) + x1; y2 = cf2_stack_getReal( opStack, index + 2 ) + y1; y3 = cf2_stack_getReal( opStack, index + 3 ) + y2; if ( count - index == 5 ) { x3 = cf2_stack_getReal( opStack, index + 4 ) + x2; ++index; } else x3 = x2; alternate = FALSE; } else { x1 = curX; y1 = cf2_stack_getReal( opStack, index + 0 ) + curY; x2 = cf2_stack_getReal( opStack, index + 1 ) + x1; y2 = cf2_stack_getReal( opStack, index + 2 ) + y1; x3 = cf2_stack_getReal( opStack, index + 3 ) + x2; if ( count - index == 5 ) { y3 = cf2_stack_getReal( opStack, index + 4 ) + y2; ++index; } else y3 = y2; alternate = TRUE; } cf2_glyphpath_curveTo( &glyphPath, x1, y1, x2, y2, x3, y3 ); curX = x3; curY = y3; index += 4; } cf2_stack_clear( opStack ); } continue; /* no need to clear stack again */ case cf2_cmdEXTENDEDNMBR: { CF2_Int v; v = (FT_Short)( ( cf2_buf_readByte( charstring ) << 8 ) | cf2_buf_readByte( charstring ) ); FT_TRACE4(( " %d", v )); cf2_stack_pushInt( opStack, v ); } continue; default: /* numbers */ { if ( /* op1 >= 32 && */ op1 <= 246 ) { CF2_Int v; v = op1 - 139; FT_TRACE4(( " %d", v )); /* -107 .. 107 */ cf2_stack_pushInt( opStack, v ); } else if ( /* op1 >= 247 && */ op1 <= 250 ) { CF2_Int v; v = op1; v -= 247; v *= 256; v += cf2_buf_readByte( charstring ); v += 108; FT_TRACE4(( " %d", v )); /* 108 .. 1131 */ cf2_stack_pushInt( opStack, v ); } else if ( /* op1 >= 251 && */ op1 <= 254 ) { CF2_Int v; v = op1; v -= 251; v *= 256; v += cf2_buf_readByte( charstring ); v = -v - 108; FT_TRACE4(( " %d", v )); /* -1131 .. -108 */ cf2_stack_pushInt( opStack, v ); } else /* op1 == 255 */ { CF2_Fixed v; v = (CF2_Fixed) ( ( (FT_UInt32)cf2_buf_readByte( charstring ) << 24 ) | ( (FT_UInt32)cf2_buf_readByte( charstring ) << 16 ) | ( (FT_UInt32)cf2_buf_readByte( charstring ) << 8 ) | (FT_UInt32)cf2_buf_readByte( charstring ) ); FT_TRACE4(( " %.2f", v / 65536.0 )); cf2_stack_pushFixed( opStack, v ); } } continue; /* don't clear stack */ } /* end of switch statement checking `op1' */ cf2_stack_clear( opStack ); } /* end of main interpreter loop */ /* we get here if the charstring ends without cf2_cmdENDCHAR */ FT_TRACE4(( "cf2_interpT2CharString:" " charstring ends without ENDCHAR\n" )); exit: /* check whether last error seen is also the first one */ cf2_setError( error, lastError ); /* free resources from objects we've used */ cf2_glyphpath_finalize( &glyphPath ); cf2_arrstack_finalize( &vStemHintArray ); cf2_arrstack_finalize( &hStemHintArray ); cf2_arrstack_finalize( &subrStack ); cf2_stack_free( opStack ); FT_TRACE4(( "\n" )); return; }
164,864
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void FrameImpl::GoForward() { NOTIMPLEMENTED(); } Commit Message: [fuchsia] Implement browser tests for WebRunner Context service. Tests may interact with the WebRunner FIDL services and the underlying browser objects for end to end testing of service and browser functionality. * Add a browser test launcher main() for WebRunner. * Add some simple navigation tests. * Wire up GoBack()/GoForward() FIDL calls. * Add embedded test server resources and initialization logic. * Add missing deletion & notification calls to BrowserContext dtor. * Use FIDL events for navigation state changes. * Bug fixes: ** Move BrowserContext and Screen deletion to PostMainMessageLoopRun(), so that they may use the MessageLoop during teardown. ** Fix Frame dtor to allow for null WindowTreeHosts (headless case) ** Fix std::move logic in Frame ctor which lead to no WebContents observer being registered. Bug: 871594 Change-Id: I36bcbd2436d534d366c6be4eeb54b9f9feadd1ac Reviewed-on: https://chromium-review.googlesource.com/1164539 Commit-Queue: Kevin Marshall <[email protected]> Reviewed-by: Wez <[email protected]> Reviewed-by: Fabrice de Gans-Riberi <[email protected]> Reviewed-by: Scott Violet <[email protected]> Cr-Commit-Position: refs/heads/master@{#584155} CWE ID: CWE-264
void FrameImpl::GoForward() { if (web_contents_->GetController().CanGoForward()) web_contents_->GetController().GoForward(); }
172,154
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ObjectBackedNativeHandler::Router( const v8::FunctionCallbackInfo<v8::Value>& args) { v8::Isolate* isolate = args.GetIsolate(); v8::HandleScope handle_scope(isolate); v8::Local<v8::Object> data = args.Data().As<v8::Object>(); v8::Local<v8::Context> context = isolate->GetCurrentContext(); v8::Local<v8::Value> handler_function_value; v8::Local<v8::Value> feature_name_value; if (!GetPrivate(context, data, kHandlerFunction, &handler_function_value) || handler_function_value->IsUndefined() || !GetPrivate(context, data, kFeatureName, &feature_name_value) || !feature_name_value->IsString()) { ScriptContext* script_context = ScriptContextSet::GetContextByV8Context(context); console::Error(script_context ? script_context->GetRenderFrame() : nullptr, "Extension view no longer exists"); return; } if (content::WorkerThread::GetCurrentId() == 0) { ScriptContext* script_context = ScriptContextSet::GetContextByV8Context(context); v8::Local<v8::String> feature_name_string = feature_name_value->ToString(context).ToLocalChecked(); std::string feature_name = *v8::String::Utf8Value(feature_name_string); if (script_context && !feature_name.empty() && !script_context->GetAvailability(feature_name).is_available()) { return; } } CHECK(handler_function_value->IsExternal()); static_cast<HandlerFunction*>( handler_function_value.As<v8::External>()->Value())->Run(args); v8::ReturnValue<v8::Value> ret = args.GetReturnValue(); v8::Local<v8::Value> ret_value = ret.Get(); if (ret_value->IsObject() && !ret_value->IsNull() && !ContextCanAccessObject(context, v8::Local<v8::Object>::Cast(ret_value), true)) { NOTREACHED() << "Insecure return value"; ret.SetUndefined(); } } Commit Message: [Extensions] Expand bindings access checks BUG=601149 BUG=601073 Review URL: https://codereview.chromium.org/1866103002 Cr-Commit-Position: refs/heads/master@{#387710} CWE ID: CWE-284
void ObjectBackedNativeHandler::Router( const v8::FunctionCallbackInfo<v8::Value>& args) { v8::Isolate* isolate = args.GetIsolate(); v8::HandleScope handle_scope(isolate); v8::Local<v8::Object> data = args.Data().As<v8::Object>(); v8::Local<v8::Context> context = isolate->GetCurrentContext(); v8::Local<v8::Value> handler_function_value; v8::Local<v8::Value> feature_name_value; if (!GetPrivate(context, data, kHandlerFunction, &handler_function_value) || handler_function_value->IsUndefined() || !GetPrivate(context, data, kFeatureName, &feature_name_value) || !feature_name_value->IsString()) { ScriptContext* script_context = ScriptContextSet::GetContextByV8Context(context); console::Error(script_context ? script_context->GetRenderFrame() : nullptr, "Extension view no longer exists"); return; } if (content::WorkerThread::GetCurrentId() == 0) { ScriptContext* script_context = ScriptContextSet::GetContextByV8Context(context); v8::Local<v8::String> feature_name_string = feature_name_value->ToString(context).ToLocalChecked(); std::string feature_name = *v8::String::Utf8Value(feature_name_string); if (script_context && !feature_name.empty()) { Feature::Availability availability = script_context->GetAvailability(feature_name); if (!availability.is_available()) { DVLOG(1) << feature_name << " is not available: " << availability.message(); return; } } } CHECK(handler_function_value->IsExternal()); static_cast<HandlerFunction*>( handler_function_value.As<v8::External>()->Value())->Run(args); v8::ReturnValue<v8::Value> ret = args.GetReturnValue(); v8::Local<v8::Value> ret_value = ret.Get(); if (ret_value->IsObject() && !ret_value->IsNull() && !ContextCanAccessObject(context, v8::Local<v8::Object>::Cast(ret_value), true)) { NOTREACHED() << "Insecure return value"; ret.SetUndefined(); } }
172,251
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: ext2_xattr_delete_inode(struct inode *inode) { struct buffer_head *bh = NULL; struct mb_cache_entry *ce; down_write(&EXT2_I(inode)->xattr_sem); if (!EXT2_I(inode)->i_file_acl) goto cleanup; bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); if (!bh) { ext2_error(inode->i_sb, "ext2_xattr_delete_inode", "inode %ld: block %d read error", inode->i_ino, EXT2_I(inode)->i_file_acl); goto cleanup; } ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count))); if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || HDR(bh)->h_blocks != cpu_to_le32(1)) { ext2_error(inode->i_sb, "ext2_xattr_delete_inode", "inode %ld: bad block %d", inode->i_ino, EXT2_I(inode)->i_file_acl); goto cleanup; } ce = mb_cache_entry_get(ext2_xattr_cache, bh->b_bdev, bh->b_blocknr); lock_buffer(bh); if (HDR(bh)->h_refcount == cpu_to_le32(1)) { if (ce) mb_cache_entry_free(ce); ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1); get_bh(bh); bforget(bh); unlock_buffer(bh); } else { le32_add_cpu(&HDR(bh)->h_refcount, -1); if (ce) mb_cache_entry_release(ce); ea_bdebug(bh, "refcount now=%d", le32_to_cpu(HDR(bh)->h_refcount)); unlock_buffer(bh); mark_buffer_dirty(bh); if (IS_SYNC(inode)) sync_dirty_buffer(bh); dquot_free_block_nodirty(inode, 1); } EXT2_I(inode)->i_file_acl = 0; cleanup: brelse(bh); up_write(&EXT2_I(inode)->xattr_sem); } Commit Message: ext2: convert to mbcache2 The conversion is generally straightforward. We convert filesystem from a global cache to per-fs one. Similarly to ext4 the tricky part is that xattr block corresponding to found mbcache entry can get freed before we get buffer lock for that block. So we have to check whether the entry is still valid after getting the buffer lock. Signed-off-by: Jan Kara <[email protected]> Signed-off-by: Theodore Ts'o <[email protected]> CWE ID: CWE-19
ext2_xattr_delete_inode(struct inode *inode) { struct buffer_head *bh = NULL; down_write(&EXT2_I(inode)->xattr_sem); if (!EXT2_I(inode)->i_file_acl) goto cleanup; bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); if (!bh) { ext2_error(inode->i_sb, "ext2_xattr_delete_inode", "inode %ld: block %d read error", inode->i_ino, EXT2_I(inode)->i_file_acl); goto cleanup; } ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count))); if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || HDR(bh)->h_blocks != cpu_to_le32(1)) { ext2_error(inode->i_sb, "ext2_xattr_delete_inode", "inode %ld: bad block %d", inode->i_ino, EXT2_I(inode)->i_file_acl); goto cleanup; } lock_buffer(bh); if (HDR(bh)->h_refcount == cpu_to_le32(1)) { __u32 hash = le32_to_cpu(HDR(bh)->h_hash); /* * This must happen under buffer lock for ext2_xattr_set2() to * reliably detect freed block */ mb2_cache_entry_delete_block(EXT2_SB(inode->i_sb)->s_mb_cache, hash, bh->b_blocknr); ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1); get_bh(bh); bforget(bh); unlock_buffer(bh); } else { le32_add_cpu(&HDR(bh)->h_refcount, -1); ea_bdebug(bh, "refcount now=%d", le32_to_cpu(HDR(bh)->h_refcount)); unlock_buffer(bh); mark_buffer_dirty(bh); if (IS_SYNC(inode)) sync_dirty_buffer(bh); dquot_free_block_nodirty(inode, 1); } EXT2_I(inode)->i_file_acl = 0; cleanup: brelse(bh); up_write(&EXT2_I(inode)->xattr_sem); }
169,979
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void Chapters::Display::Init() { m_string = NULL; m_language = NULL; m_country = NULL; } Commit Message: libwebm: Pull from upstream Rolling mkvparser from upstream. Primarily for fixing a bug on parsing failures with certain Opus WebM files. Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae The diff is so huge because there were some style clean ups upstream. But it was ensured that there were no breaking changes when the style clean ups was done upstream. Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039 CWE ID: CWE-119
void Chapters::Display::Init()
174,388
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: char* _single_string_alloc_and_copy( LPCWSTR in ) { char *chr; int len = 0; if ( !in ) { return in; } while ( in[ len ] != 0 ) { len ++; } chr = malloc( len + 1 ); len = 0; while ( in[ len ] != 0 ) { chr[ len ] = 0xFF & in[ len ]; len ++; } chr[ len ++ ] = '\0'; return chr; } Commit Message: New Pre Source CWE ID: CWE-119
char* _single_string_alloc_and_copy( LPCWSTR in ) { char *chr; int len = 0; if ( !in ) { return NULL; } while ( in[ len ] != 0 ) { len ++; } chr = malloc( len + 1 ); len = 0; while ( in[ len ] != 0 ) { chr[ len ] = 0xFF & in[ len ]; len ++; } chr[ len ++ ] = '\0'; return chr; }
169,315
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool IDNSpoofChecker::SafeToDisplayAsUnicode(base::StringPiece16 label, bool is_tld_ascii) { UErrorCode status = U_ZERO_ERROR; int32_t result = uspoof_check(checker_, label.data(), base::checked_cast<int32_t>(label.size()), nullptr, &status); if (U_FAILURE(status) || (result & USPOOF_ALL_CHECKS)) return false; icu::UnicodeString label_string(FALSE, label.data(), base::checked_cast<int32_t>(label.size())); if (deviation_characters_.containsSome(label_string)) return false; result &= USPOOF_RESTRICTION_LEVEL_MASK; if (result == USPOOF_ASCII) return true; if (result == USPOOF_SINGLE_SCRIPT_RESTRICTIVE && kana_letters_exceptions_.containsNone(label_string) && combining_diacritics_exceptions_.containsNone(label_string)) { return !is_tld_ascii || !IsMadeOfLatinAlikeCyrillic(label_string); } if (non_ascii_latin_letters_.containsSome(label_string) && !lgc_letters_n_ascii_.containsAll(label_string)) return false; icu::RegexMatcher* dangerous_pattern = reinterpret_cast<icu::RegexMatcher*>(DangerousPatternTLS().Get()); if (!dangerous_pattern) { dangerous_pattern = new icu::RegexMatcher( icu::UnicodeString( R"([^\p{scx=kana}\p{scx=hira}\p{scx=hani}])" R"([\u30ce\u30f3\u30bd\u30be\u4e36\u4e40\u4e41\u4e3f])" R"([^\p{scx=kana}\p{scx=hira}\p{scx=hani}]|)" R"([^\p{scx=kana}][\u30fd\u30fe]|^[\u30fd\u30fe]|)" R"(^[\p{scx=kana}]+[\u3078-\u307a][\p{scx=kana}]+$|)" R"(^[\p{scx=hira}]+[\u30d8-\u30da][\p{scx=hira}]+$|)" R"([^\p{scx=kana}\p{scx=hira}]\u30fc|^\u30fc|)" R"([a-z]\u30fb|\u30fb[a-z]|)" R"([^\p{scx=kana}\p{scx=hira}\p{scx=hani}])" R"([\u4e00\u3127])" R"([^\p{scx=kana}\p{scx=hira}\p{scx=hani}]|)" R"([^\p{scx=latn}\p{scx=grek}\p{scx=cyrl}][\u0300-\u0339]|)" R"(\u0131[\u0300-\u0339]|)" R"(\u3099|\u309A|)" R"([ijl]\u0307)", -1, US_INV), 0, status); DangerousPatternTLS().Set(dangerous_pattern); } dangerous_pattern->reset(label_string); return !dangerous_pattern->find(); } Commit Message: Restrict Latin Small Letter Thorn (U+00FE) to Icelandic domains This character (þ) can be confused with both b and p when used in a domain name. IDN spoof checker doesn't have a good way of flagging a character as confusable with multiple characters, so it can't catch spoofs containing this character. As a practical fix, this CL restricts this character to domains under Iceland's ccTLD (.is). With this change, a domain name containing "þ" with a non-.is TLD will be displayed in punycode in the UI. This change affects less than 10 real world domains with limited popularity. Bug: 798892, 843352, 904327, 1017707 Change-Id: Ib07190dcde406bf62ce4413688a4fb4859a51030 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1879992 Commit-Queue: Mustafa Emre Acer <[email protected]> Reviewed-by: Christopher Thompson <[email protected]> Cr-Commit-Position: refs/heads/master@{#709309} CWE ID:
bool IDNSpoofChecker::SafeToDisplayAsUnicode(base::StringPiece16 label, bool IDNSpoofChecker::SafeToDisplayAsUnicode( base::StringPiece16 label, base::StringPiece top_level_domain) { UErrorCode status = U_ZERO_ERROR; int32_t result = uspoof_check(checker_, label.data(), base::checked_cast<int32_t>(label.size()), nullptr, &status); if (U_FAILURE(status) || (result & USPOOF_ALL_CHECKS)) return false; icu::UnicodeString label_string(FALSE, label.data(), base::checked_cast<int32_t>(label.size())); if (deviation_characters_.containsSome(label_string)) return false; // Latin small letter thorn (U+00FE) can be used to spoof both b and p. It's // used in modern Icelandic orthography, so allow it for the Icelandic ccTLD // (.is) but block in any other TLD. if (label_string.length() > 1 && label_string.indexOf("þ") != -1 && top_level_domain != ".is") { return false; } result &= USPOOF_RESTRICTION_LEVEL_MASK; if (result == USPOOF_ASCII) return true; if (result == USPOOF_SINGLE_SCRIPT_RESTRICTIVE && kana_letters_exceptions_.containsNone(label_string) && combining_diacritics_exceptions_.containsNone(label_string)) { bool is_tld_ascii = !top_level_domain.starts_with(".xn--"); return !is_tld_ascii || !IsMadeOfLatinAlikeCyrillic(label_string); } if (non_ascii_latin_letters_.containsSome(label_string) && !lgc_letters_n_ascii_.containsAll(label_string)) return false; icu::RegexMatcher* dangerous_pattern = reinterpret_cast<icu::RegexMatcher*>(DangerousPatternTLS().Get()); if (!dangerous_pattern) { dangerous_pattern = new icu::RegexMatcher( icu::UnicodeString( R"([^\p{scx=kana}\p{scx=hira}\p{scx=hani}])" R"([\u30ce\u30f3\u30bd\u30be\u4e36\u4e40\u4e41\u4e3f])" R"([^\p{scx=kana}\p{scx=hira}\p{scx=hani}]|)" R"([^\p{scx=kana}][\u30fd\u30fe]|^[\u30fd\u30fe]|)" R"(^[\p{scx=kana}]+[\u3078-\u307a][\p{scx=kana}]+$|)" R"(^[\p{scx=hira}]+[\u30d8-\u30da][\p{scx=hira}]+$|)" R"([^\p{scx=kana}\p{scx=hira}]\u30fc|^\u30fc|)" R"([a-z]\u30fb|\u30fb[a-z]|)" R"([^\p{scx=kana}\p{scx=hira}\p{scx=hani}])" R"([\u4e00\u3127])" R"([^\p{scx=kana}\p{scx=hira}\p{scx=hani}]|)" R"([^\p{scx=latn}\p{scx=grek}\p{scx=cyrl}][\u0300-\u0339]|)" R"(\u0131[\u0300-\u0339]|)" R"(\u3099|\u309A|)" R"([ijl]\u0307)", -1, US_INV), 0, status); DangerousPatternTLS().Set(dangerous_pattern); } dangerous_pattern->reset(label_string); return !dangerous_pattern->find(); }
172,727
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: IndexedDBTransaction::IndexedDBTransaction( int64_t id, IndexedDBConnection* connection, const std::set<int64_t>& object_store_ids, blink::WebIDBTransactionMode mode, IndexedDBBackingStore::Transaction* backing_store_transaction) : id_(id), object_store_ids_(object_store_ids), mode_(mode), connection_(connection), transaction_(backing_store_transaction), ptr_factory_(this) { IDB_ASYNC_TRACE_BEGIN("IndexedDBTransaction::lifetime", this); callbacks_ = connection_->callbacks(); database_ = connection_->database(); diagnostics_.tasks_scheduled = 0; diagnostics_.tasks_completed = 0; diagnostics_.creation_time = base::Time::Now(); } Commit Message: [IndexedDB] Fixing early destruction of connection during forceclose Patch is as small as possible for merging. Bug: 842990 Change-Id: I9968ffee1bf3279e61e1ec13e4d541f713caf12f Reviewed-on: https://chromium-review.googlesource.com/1062935 Commit-Queue: Daniel Murphy <[email protected]> Commit-Queue: Victor Costan <[email protected]> Reviewed-by: Victor Costan <[email protected]> Cr-Commit-Position: refs/heads/master@{#559383} CWE ID:
IndexedDBTransaction::IndexedDBTransaction( int64_t id, IndexedDBConnection* connection, const std::set<int64_t>& object_store_ids, blink::WebIDBTransactionMode mode, IndexedDBBackingStore::Transaction* backing_store_transaction) : id_(id), object_store_ids_(object_store_ids), mode_(mode), connection_(connection->GetWeakPtr()), transaction_(backing_store_transaction), ptr_factory_(this) { IDB_ASYNC_TRACE_BEGIN("IndexedDBTransaction::lifetime", this); callbacks_ = connection_->callbacks(); database_ = connection_->database(); diagnostics_.tasks_scheduled = 0; diagnostics_.tasks_completed = 0; diagnostics_.creation_time = base::Time::Now(); }
173,220
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void *load_device_tree(const char *filename_path, int *sizep) { int dt_size; int dt_file_load_size; int ret; void *fdt = NULL; *sizep = 0; dt_size = get_image_size(filename_path); if (dt_size < 0) { error_report("Unable to get size of device tree file '%s'", filename_path); goto fail; } /* Expand to 2x size to give enough room for manipulation. */ dt_size += 10000; dt_size *= 2; /* First allocate space in qemu for device tree */ fdt = g_malloc0(dt_size); dt_file_load_size = load_image(filename_path, fdt); if (dt_file_load_size < 0) { error_report("Unable to open device tree file '%s'", filename_path); goto fail; } ret = fdt_open_into(fdt, fdt, dt_size); if (ret) { error_report("Unable to copy device tree in memory"); goto fail; } /* Check sanity of device tree */ if (fdt_check_header(fdt)) { error_report("Device tree file loaded into memory is invalid: %s", filename_path); goto fail; } *sizep = dt_size; return fdt; fail: g_free(fdt); return NULL; } Commit Message: CWE ID: CWE-119
void *load_device_tree(const char *filename_path, int *sizep) { int dt_size; int dt_file_load_size; int ret; void *fdt = NULL; *sizep = 0; dt_size = get_image_size(filename_path); if (dt_size < 0) { error_report("Unable to get size of device tree file '%s'", filename_path); goto fail; } /* Expand to 2x size to give enough room for manipulation. */ dt_size += 10000; dt_size *= 2; /* First allocate space in qemu for device tree */ fdt = g_malloc0(dt_size); dt_file_load_size = load_image_size(filename_path, fdt, dt_size); if (dt_file_load_size < 0) { error_report("Unable to open device tree file '%s'", filename_path); goto fail; } ret = fdt_open_into(fdt, fdt, dt_size); if (ret) { error_report("Unable to copy device tree in memory"); goto fail; } /* Check sanity of device tree */ if (fdt_check_header(fdt)) { error_report("Device tree file loaded into memory is invalid: %s", filename_path); goto fail; } *sizep = dt_size; return fdt; fail: g_free(fdt); return NULL; }
165,222
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static char *print_array( cJSON *item, int depth, int fmt ) { char **entries; char *out = 0, *ptr, *ret; int len = 5; cJSON *child = item->child; int numentries = 0, i = 0, fail = 0; /* How many entries in the array? */ while ( child ) { ++numentries; child = child->next; } /* Allocate an array to hold the values for each. */ if ( ! ( entries = (char**) cJSON_malloc( numentries * sizeof(char*) ) ) ) return 0; memset( entries, 0, numentries * sizeof(char*) ); /* Retrieve all the results. */ child = item->child; while ( child && ! fail ) { ret = print_value( child, depth + 1, fmt ); entries[i++] = ret; if ( ret ) len += strlen( ret ) + 2 + ( fmt ? 1 : 0 ); else fail = 1; child = child -> next; } /* If we didn't fail, try to malloc the output string. */ if ( ! fail ) { out = (char*) cJSON_malloc( len ); if ( ! out ) fail = 1; } /* Handle failure. */ if ( fail ) { for ( i = 0; i < numentries; ++i ) if ( entries[i] ) cJSON_free( entries[i] ); cJSON_free( entries ); return 0; } /* Compose the output array. */ *out = '['; ptr = out + 1; *ptr = 0; for ( i = 0; i < numentries; ++i ) { strcpy( ptr, entries[i] ); ptr += strlen( entries[i] ); if ( i != numentries - 1 ) { *ptr++ = ','; if ( fmt ) *ptr++ = ' '; *ptr = 0; } cJSON_free( entries[i] ); } cJSON_free( entries ); *ptr++ = ']'; *ptr++ = 0; return out; } Commit Message: Fix a buffer overflow / heap corruption issue that could occur if a malformed JSON string was passed on the control channel. This issue, present in the cJSON library, was already fixed upstream, so was addressed here in iperf3 by importing a newer version of cJSON (plus local ESnet modifications). Discovered and reported by Dave McDaniel, Cisco Talos. Based on a patch by @dopheide-esnet, with input from @DaveGamble. Cross-references: TALOS-CAN-0164, ESNET-SECADV-2016-0001, CVE-2016-4303 (cherry picked from commit ed94082be27d971a5e1b08b666e2c217cf470a40) Signed-off-by: Bruce A. Mah <[email protected]> CWE ID: CWE-119
static char *print_array( cJSON *item, int depth, int fmt ) static char *print_array(cJSON *item,int depth,int fmt,printbuffer *p) { char **entries; char *out=0,*ptr,*ret;int len=5; cJSON *child=item->child; int numentries=0,i=0,fail=0; size_t tmplen=0; /* How many entries in the array? */ while (child) numentries++,child=child->next; /* Explicitly handle numentries==0 */ if (!numentries) { if (p) out=ensure(p,3); else out=(char*)cJSON_malloc(3); if (out) strcpy(out,"[]"); return out; } if (p) { /* Compose the output array. */ i=p->offset; ptr=ensure(p,1);if (!ptr) return 0; *ptr='['; p->offset++; child=item->child; while (child && !fail) { print_value(child,depth+1,fmt,p); p->offset=update(p); if (child->next) {len=fmt?2:1;ptr=ensure(p,len+1);if (!ptr) return 0;*ptr++=',';if(fmt)*ptr++=' ';*ptr=0;p->offset+=len;} child=child->next; } ptr=ensure(p,2);if (!ptr) return 0; *ptr++=']';*ptr=0; out=(p->buffer)+i; } else { /* Allocate an array to hold the values for each */ entries=(char**)cJSON_malloc(numentries*sizeof(char*)); if (!entries) return 0; memset(entries,0,numentries*sizeof(char*)); /* Retrieve all the results: */ child=item->child; while (child && !fail) { ret=print_value(child,depth+1,fmt,0); entries[i++]=ret; if (ret) len+=strlen(ret)+2+(fmt?1:0); else fail=1; child=child->next; } /* If we didn't fail, try to malloc the output string */ if (!fail) out=(char*)cJSON_malloc(len); /* If that fails, we fail. */ if (!out) fail=1; /* Handle failure. */ if (fail) { for (i=0;i<numentries;i++) if (entries[i]) cJSON_free(entries[i]); cJSON_free(entries); return 0; } /* Compose the output array. */ *out='['; ptr=out+1;*ptr=0; for (i=0;i<numentries;i++) { tmplen=strlen(entries[i]);memcpy(ptr,entries[i],tmplen);ptr+=tmplen; if (i!=numentries-1) {*ptr++=',';if(fmt)*ptr++=' ';*ptr=0;} cJSON_free(entries[i]); } cJSON_free(entries); *ptr++=']';*ptr++=0; } return out; }
167,306
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: WORD32 ih264d_parse_decode_slice(UWORD8 u1_is_idr_slice, UWORD8 u1_nal_ref_idc, dec_struct_t *ps_dec /* Decoder parameters */ ) { dec_bit_stream_t * ps_bitstrm = ps_dec->ps_bitstrm; dec_pic_params_t *ps_pps; dec_seq_params_t *ps_seq; dec_slice_params_t *ps_cur_slice = ps_dec->ps_cur_slice; pocstruct_t s_tmp_poc; WORD32 i_delta_poc[2]; WORD32 i4_poc = 0; UWORD16 u2_first_mb_in_slice, u2_frame_num; UWORD8 u1_field_pic_flag, u1_redundant_pic_cnt = 0, u1_slice_type; UWORD32 u4_idr_pic_id = 0; UWORD8 u1_bottom_field_flag, u1_pic_order_cnt_type; UWORD8 u1_nal_unit_type; UWORD32 *pu4_bitstrm_buf = ps_bitstrm->pu4_buffer; UWORD32 *pu4_bitstrm_ofst = &ps_bitstrm->u4_ofst; WORD8 i1_is_end_of_poc; WORD32 ret, end_of_frame; WORD32 prev_slice_err, num_mb_skipped; UWORD8 u1_mbaff; pocstruct_t *ps_cur_poc; UWORD32 u4_temp; WORD32 i_temp; UWORD32 u4_call_end_of_pic = 0; /* read FirstMbInSlice and slice type*/ ps_dec->ps_dpb_cmds->u1_dpb_commands_read_slc = 0; u2_first_mb_in_slice = ih264d_uev(pu4_bitstrm_ofst, pu4_bitstrm_buf); if(u2_first_mb_in_slice > (ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs)) { return ERROR_CORRUPTED_SLICE; } /*we currently don not support ASO*/ if(((u2_first_mb_in_slice << ps_cur_slice->u1_mbaff_frame_flag) <= ps_dec->u2_cur_mb_addr) && (ps_dec->u4_first_slice_in_pic == 0)) { return ERROR_CORRUPTED_SLICE; } COPYTHECONTEXT("SH: first_mb_in_slice",u2_first_mb_in_slice); u4_temp = ih264d_uev(pu4_bitstrm_ofst, pu4_bitstrm_buf); if(u4_temp > 9) return ERROR_INV_SLC_TYPE_T; u1_slice_type = u4_temp; COPYTHECONTEXT("SH: slice_type",(u1_slice_type)); ps_dec->u1_sl_typ_5_9 = 0; /* Find Out the Slice Type is 5 to 9 or not then Set the Flag */ /* u1_sl_typ_5_9 = 1 .Which tells that all the slices in the Pic*/ /* will be of same type of current */ if(u1_slice_type > 4) { u1_slice_type -= 5; ps_dec->u1_sl_typ_5_9 = 1; } { UWORD32 skip; if((ps_dec->i4_app_skip_mode == IVD_SKIP_PB) || (ps_dec->i4_dec_skip_mode == IVD_SKIP_PB)) { UWORD32 u4_bit_stream_offset = 0; if(ps_dec->u1_nal_unit_type == IDR_SLICE_NAL) { skip = 0; ps_dec->i4_dec_skip_mode = IVD_SKIP_NONE; } else if((I_SLICE == u1_slice_type) && (1 >= ps_dec->ps_cur_sps->u1_num_ref_frames)) { skip = 0; ps_dec->i4_dec_skip_mode = IVD_SKIP_NONE; } else { skip = 1; } /* If one frame worth of data is already skipped, do not skip the next one */ if((0 == u2_first_mb_in_slice) && (1 == ps_dec->u4_prev_nal_skipped)) { skip = 0; } if(skip) { ps_dec->u4_prev_nal_skipped = 1; ps_dec->i4_dec_skip_mode = IVD_SKIP_PB; return 0; } else { /* If the previous NAL was skipped, then do not process that buffer in this call. Return to app and process it in the next call. This is necessary to handle cases where I/IDR is not complete in the current buffer and application intends to fill the remaining part of the bitstream later. This ensures we process only frame worth of data in every call */ if(1 == ps_dec->u4_prev_nal_skipped) { ps_dec->u4_return_to_app = 1; return 0; } } } } u4_temp = ih264d_uev(pu4_bitstrm_ofst, pu4_bitstrm_buf); if(u4_temp & MASK_ERR_PIC_SET_ID) return ERROR_INV_SLICE_HDR_T; /* discard slice if pic param is invalid */ COPYTHECONTEXT("SH: pic_parameter_set_id", u4_temp); ps_pps = &ps_dec->ps_pps[u4_temp]; if(FALSE == ps_pps->u1_is_valid) { return ERROR_INV_SLICE_HDR_T; } ps_seq = ps_pps->ps_sps; if(!ps_seq) return ERROR_INV_SLICE_HDR_T; if(FALSE == ps_seq->u1_is_valid) return ERROR_INV_SLICE_HDR_T; /* Get the frame num */ u2_frame_num = ih264d_get_bits_h264(ps_bitstrm, ps_seq->u1_bits_in_frm_num); COPYTHECONTEXT("SH: frame_num", u2_frame_num); if(!ps_dec->u1_first_slice_in_stream && (ps_dec->u4_first_slice_in_pic == 2)) { pocstruct_t *ps_prev_poc = &ps_dec->s_prev_pic_poc; pocstruct_t *ps_cur_poc = &ps_dec->s_cur_pic_poc; ps_dec->u2_mbx = 0xffff; ps_dec->u2_mby = 0; if((0 == u1_is_idr_slice) && ps_cur_slice->u1_nal_ref_idc) ps_dec->u2_prev_ref_frame_num = ps_cur_slice->u2_frame_num; if(u1_is_idr_slice || ps_cur_slice->u1_mmco_equalto5) ps_dec->u2_prev_ref_frame_num = 0; if(ps_dec->ps_cur_sps->u1_gaps_in_frame_num_value_allowed_flag) { ih264d_decode_gaps_in_frame_num(ps_dec, u2_frame_num); } ps_prev_poc->i4_prev_frame_num_ofst = ps_cur_poc->i4_prev_frame_num_ofst; ps_prev_poc->u2_frame_num = ps_cur_poc->u2_frame_num; ps_prev_poc->u1_mmco_equalto5 = ps_cur_slice->u1_mmco_equalto5; if(ps_cur_slice->u1_nal_ref_idc) { ps_prev_poc->i4_pic_order_cnt_lsb = ps_cur_poc->i4_pic_order_cnt_lsb; ps_prev_poc->i4_pic_order_cnt_msb = ps_cur_poc->i4_pic_order_cnt_msb; ps_prev_poc->i4_delta_pic_order_cnt_bottom = ps_cur_poc->i4_delta_pic_order_cnt_bottom; ps_prev_poc->i4_delta_pic_order_cnt[0] = ps_cur_poc->i4_delta_pic_order_cnt[0]; ps_prev_poc->i4_delta_pic_order_cnt[1] = ps_cur_poc->i4_delta_pic_order_cnt[1]; ps_prev_poc->u1_bot_field = ps_cur_poc->u1_bot_field; } ps_dec->u2_total_mbs_coded = 0; } /* Get the field related flags */ if(!ps_seq->u1_frame_mbs_only_flag) { u1_field_pic_flag = ih264d_get_bit_h264(ps_bitstrm); COPYTHECONTEXT("SH: field_pic_flag", u1_field_pic_flag); u1_bottom_field_flag = 0; if(u1_field_pic_flag) { ps_dec->pu1_inv_scan = (UWORD8 *)gau1_ih264d_inv_scan_fld; u1_bottom_field_flag = ih264d_get_bit_h264(ps_bitstrm); COPYTHECONTEXT("SH: bottom_field_flag", u1_bottom_field_flag); } else { ps_dec->pu1_inv_scan = (UWORD8 *)gau1_ih264d_inv_scan; } } else { u1_field_pic_flag = 0; u1_bottom_field_flag = 0; ps_dec->pu1_inv_scan = (UWORD8 *)gau1_ih264d_inv_scan; } u1_nal_unit_type = SLICE_NAL; if(u1_is_idr_slice) { if(0 == u1_field_pic_flag) { ps_dec->u1_top_bottom_decoded = TOP_FIELD_ONLY | BOT_FIELD_ONLY; } u1_nal_unit_type = IDR_SLICE_NAL; u4_idr_pic_id = ih264d_uev(pu4_bitstrm_ofst, pu4_bitstrm_buf); if(u4_idr_pic_id > 65535) return ERROR_INV_SLICE_HDR_T; COPYTHECONTEXT("SH: ", u4_idr_pic_id); } /* read delta pic order count information*/ i_delta_poc[0] = i_delta_poc[1] = 0; s_tmp_poc.i4_pic_order_cnt_lsb = 0; s_tmp_poc.i4_delta_pic_order_cnt_bottom = 0; u1_pic_order_cnt_type = ps_seq->u1_pic_order_cnt_type; if(u1_pic_order_cnt_type == 0) { i_temp = ih264d_get_bits_h264( ps_bitstrm, ps_seq->u1_log2_max_pic_order_cnt_lsb_minus); if(i_temp < 0 || i_temp >= ps_seq->i4_max_pic_order_cntLsb) return ERROR_INV_SLICE_HDR_T; s_tmp_poc.i4_pic_order_cnt_lsb = i_temp; COPYTHECONTEXT("SH: pic_order_cnt_lsb", s_tmp_poc.i4_pic_order_cnt_lsb); if((ps_pps->u1_pic_order_present_flag == 1) && (!u1_field_pic_flag)) { s_tmp_poc.i4_delta_pic_order_cnt_bottom = ih264d_sev( pu4_bitstrm_ofst, pu4_bitstrm_buf); COPYTHECONTEXT("SH: delta_pic_order_cnt_bottom", s_tmp_poc.i4_delta_pic_order_cnt_bottom); } } s_tmp_poc.i4_delta_pic_order_cnt[0] = 0; s_tmp_poc.i4_delta_pic_order_cnt[1] = 0; if(u1_pic_order_cnt_type == 1 && (!ps_seq->u1_delta_pic_order_always_zero_flag)) { s_tmp_poc.i4_delta_pic_order_cnt[0] = ih264d_sev(pu4_bitstrm_ofst, pu4_bitstrm_buf); COPYTHECONTEXT("SH: delta_pic_order_cnt[0]", s_tmp_poc.i4_delta_pic_order_cnt[0]); if(ps_pps->u1_pic_order_present_flag && !u1_field_pic_flag) { s_tmp_poc.i4_delta_pic_order_cnt[1] = ih264d_sev( pu4_bitstrm_ofst, pu4_bitstrm_buf); COPYTHECONTEXT("SH: delta_pic_order_cnt[1]", s_tmp_poc.i4_delta_pic_order_cnt[1]); } } if(ps_pps->u1_redundant_pic_cnt_present_flag) { u4_temp = ih264d_uev(pu4_bitstrm_ofst, pu4_bitstrm_buf); if(u4_temp > MAX_REDUNDANT_PIC_CNT) return ERROR_INV_SLICE_HDR_T; u1_redundant_pic_cnt = u4_temp; COPYTHECONTEXT("SH: redundant_pic_cnt", u1_redundant_pic_cnt); } /*--------------------------------------------------------------------*/ /* Check if the slice is part of new picture */ /*--------------------------------------------------------------------*/ /* First slice of a picture is always considered as part of new picture */ i1_is_end_of_poc = 1; ps_dec->ps_dec_err_status->u1_err_flag &= MASK_REJECT_CUR_PIC; if(ps_dec->u4_first_slice_in_pic != 2) { i1_is_end_of_poc = ih264d_is_end_of_pic(u2_frame_num, u1_nal_ref_idc, &s_tmp_poc, &ps_dec->s_cur_pic_poc, ps_cur_slice, u1_pic_order_cnt_type, u1_nal_unit_type, u4_idr_pic_id, u1_field_pic_flag, u1_bottom_field_flag); if(i1_is_end_of_poc) { ps_dec->u1_first_slice_in_stream = 0; return ERROR_INCOMPLETE_FRAME; } } /*--------------------------------------------------------------------*/ /* Check for error in slice and parse the missing/corrupted MB's */ /* as skip-MB's in an inserted P-slice */ /*--------------------------------------------------------------------*/ u1_mbaff = ps_seq->u1_mb_aff_flag && (!u1_field_pic_flag); prev_slice_err = 0; if(i1_is_end_of_poc || ps_dec->u1_first_slice_in_stream) { if(u2_frame_num != ps_dec->u2_prv_frame_num && ps_dec->u1_top_bottom_decoded != 0 && ps_dec->u1_top_bottom_decoded != (TOP_FIELD_ONLY | BOT_FIELD_ONLY)) { ps_dec->u1_dangling_field = 1; if(ps_dec->u4_first_slice_in_pic) { prev_slice_err = 1; } else { prev_slice_err = 2; } if(ps_dec->u1_top_bottom_decoded ==TOP_FIELD_ONLY) ps_cur_slice->u1_bottom_field_flag = 1; else ps_cur_slice->u1_bottom_field_flag = 0; num_mb_skipped = (ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs) - ps_dec->u2_total_mbs_coded; ps_cur_poc = &ps_dec->s_cur_pic_poc; u1_is_idr_slice = ps_cur_slice->u1_nal_unit_type == IDR_SLICE_NAL; } else if(ps_dec->u4_first_slice_in_pic == 2) { if(u2_first_mb_in_slice > 0) { prev_slice_err = 1; num_mb_skipped = u2_first_mb_in_slice << u1_mbaff; ps_cur_poc = &s_tmp_poc; ps_cur_slice->u4_idr_pic_id = u4_idr_pic_id; ps_cur_slice->u1_field_pic_flag = u1_field_pic_flag; ps_cur_slice->u1_bottom_field_flag = u1_bottom_field_flag; ps_cur_slice->i4_pic_order_cnt_lsb = s_tmp_poc.i4_pic_order_cnt_lsb; ps_cur_slice->u1_nal_unit_type = u1_nal_unit_type; ps_cur_slice->u1_redundant_pic_cnt = u1_redundant_pic_cnt; ps_cur_slice->u1_nal_ref_idc = u1_nal_ref_idc; ps_cur_slice->u1_pic_order_cnt_type = u1_pic_order_cnt_type; ps_cur_slice->u1_mbaff_frame_flag = ps_seq->u1_mb_aff_flag && (!u1_field_pic_flag); } } else { if(ps_dec->u4_first_slice_in_pic) { /* if valid slice header is not decoded do start of pic processing * since in the current process call, frame num is not updated in the slice structure yet * ih264d_is_end_of_pic is checked with valid frame num of previous process call, * although i1_is_end_of_poc is set there could be more slices in the frame, * so conceal only till cur slice */ prev_slice_err = 1; num_mb_skipped = u2_first_mb_in_slice << u1_mbaff; } else { /* since i1_is_end_of_poc is set ,means new frame num is encountered. so conceal the current frame * completely */ prev_slice_err = 2; num_mb_skipped = (ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs) - ps_dec->u2_total_mbs_coded; } ps_cur_poc = &s_tmp_poc; } } else { if((u2_first_mb_in_slice << u1_mbaff) > ps_dec->u2_total_mbs_coded) { prev_slice_err = 2; num_mb_skipped = (u2_first_mb_in_slice << u1_mbaff) - ps_dec->u2_total_mbs_coded; ps_cur_poc = &s_tmp_poc; } else if((u2_first_mb_in_slice << u1_mbaff) < ps_dec->u2_total_mbs_coded) { return ERROR_CORRUPTED_SLICE; } } if(prev_slice_err) { ret = ih264d_mark_err_slice_skip(ps_dec, num_mb_skipped, u1_is_idr_slice, u2_frame_num, ps_cur_poc, prev_slice_err); if(ps_dec->u1_dangling_field == 1) { ps_dec->u1_second_field = 1 - ps_dec->u1_second_field; ps_cur_slice->u1_bottom_field_flag = u1_bottom_field_flag; ps_dec->u2_prv_frame_num = u2_frame_num; ps_dec->u1_first_slice_in_stream = 0; return ERROR_DANGLING_FIELD_IN_PIC; } if(prev_slice_err == 2) { ps_dec->u1_first_slice_in_stream = 0; return ERROR_INCOMPLETE_FRAME; } if(ps_dec->u2_total_mbs_coded >= ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs) { /* return if all MBs in frame are parsed*/ ps_dec->u1_first_slice_in_stream = 0; return ERROR_IN_LAST_SLICE_OF_PIC; } if(ps_dec->ps_dec_err_status->u1_err_flag & REJECT_CUR_PIC) { ih264d_err_pic_dispbuf_mgr(ps_dec); return ERROR_NEW_FRAME_EXPECTED; } if(ret != OK) return ret; i1_is_end_of_poc = 0; } if (ps_dec->u4_first_slice_in_pic == 0) { ps_dec->ps_parse_cur_slice++; ps_dec->u2_cur_slice_num++; } if((ps_dec->u1_separate_parse == 0) && (ps_dec->u4_first_slice_in_pic == 0)) { ps_dec->ps_decode_cur_slice++; } ps_dec->u1_slice_header_done = 0; if(u1_field_pic_flag) { ps_dec->u2_prv_frame_num = u2_frame_num; } if(ps_cur_slice->u1_mmco_equalto5) { WORD32 i4_temp_poc; WORD32 i4_top_field_order_poc, i4_bot_field_order_poc; if(!ps_cur_slice->u1_field_pic_flag) // or a complementary field pair { i4_top_field_order_poc = ps_dec->ps_cur_pic->i4_top_field_order_cnt; i4_bot_field_order_poc = ps_dec->ps_cur_pic->i4_bottom_field_order_cnt; i4_temp_poc = MIN(i4_top_field_order_poc, i4_bot_field_order_poc); } else if(!ps_cur_slice->u1_bottom_field_flag) i4_temp_poc = ps_dec->ps_cur_pic->i4_top_field_order_cnt; else i4_temp_poc = ps_dec->ps_cur_pic->i4_bottom_field_order_cnt; ps_dec->ps_cur_pic->i4_top_field_order_cnt = i4_temp_poc - ps_dec->ps_cur_pic->i4_top_field_order_cnt; ps_dec->ps_cur_pic->i4_bottom_field_order_cnt = i4_temp_poc - ps_dec->ps_cur_pic->i4_bottom_field_order_cnt; ps_dec->ps_cur_pic->i4_poc = i4_temp_poc; ps_dec->ps_cur_pic->i4_avg_poc = i4_temp_poc; } if(ps_dec->u4_first_slice_in_pic == 2) { ret = ih264d_decode_pic_order_cnt(u1_is_idr_slice, u2_frame_num, &ps_dec->s_prev_pic_poc, &s_tmp_poc, ps_cur_slice, ps_pps, u1_nal_ref_idc, u1_bottom_field_flag, u1_field_pic_flag, &i4_poc); if(ret != OK) return ret; /* Display seq no calculations */ if(i4_poc >= ps_dec->i4_max_poc) ps_dec->i4_max_poc = i4_poc; /* IDR Picture or POC wrap around */ if(i4_poc == 0) { ps_dec->i4_prev_max_display_seq = ps_dec->i4_prev_max_display_seq + ps_dec->i4_max_poc + ps_dec->u1_max_dec_frame_buffering + 1; ps_dec->i4_max_poc = 0; } } /*--------------------------------------------------------------------*/ /* Copy the values read from the bitstream to the slice header and then*/ /* If the slice is first slice in picture, then do Start of Picture */ /* processing. */ /*--------------------------------------------------------------------*/ ps_cur_slice->i4_delta_pic_order_cnt[0] = i_delta_poc[0]; ps_cur_slice->i4_delta_pic_order_cnt[1] = i_delta_poc[1]; ps_cur_slice->u4_idr_pic_id = u4_idr_pic_id; ps_cur_slice->u2_first_mb_in_slice = u2_first_mb_in_slice; ps_cur_slice->u1_field_pic_flag = u1_field_pic_flag; ps_cur_slice->u1_bottom_field_flag = u1_bottom_field_flag; ps_cur_slice->u1_slice_type = u1_slice_type; ps_cur_slice->i4_pic_order_cnt_lsb = s_tmp_poc.i4_pic_order_cnt_lsb; ps_cur_slice->u1_nal_unit_type = u1_nal_unit_type; ps_cur_slice->u1_redundant_pic_cnt = u1_redundant_pic_cnt; ps_cur_slice->u1_nal_ref_idc = u1_nal_ref_idc; ps_cur_slice->u1_pic_order_cnt_type = u1_pic_order_cnt_type; if(ps_seq->u1_frame_mbs_only_flag) ps_cur_slice->u1_direct_8x8_inference_flag = ps_seq->u1_direct_8x8_inference_flag; else ps_cur_slice->u1_direct_8x8_inference_flag = 1; if(u1_slice_type == B_SLICE) { ps_cur_slice->u1_direct_spatial_mv_pred_flag = ih264d_get_bit_h264( ps_bitstrm); COPYTHECONTEXT("SH: direct_spatial_mv_pred_flag", ps_cur_slice->u1_direct_spatial_mv_pred_flag); if(ps_cur_slice->u1_direct_spatial_mv_pred_flag) ps_cur_slice->pf_decodeDirect = ih264d_decode_spatial_direct; else ps_cur_slice->pf_decodeDirect = ih264d_decode_temporal_direct; if(!((ps_pps->ps_sps->u1_mb_aff_flag) && (!u1_field_pic_flag))) ps_dec->pf_mvpred = ih264d_mvpred_nonmbaffB; } else { if(!((ps_pps->ps_sps->u1_mb_aff_flag) && (!u1_field_pic_flag))) ps_dec->pf_mvpred = ih264d_mvpred_nonmbaff; } if(ps_dec->u4_first_slice_in_pic == 2) { if(u2_first_mb_in_slice == 0) { ret = ih264d_start_of_pic(ps_dec, i4_poc, &s_tmp_poc, u2_frame_num, ps_pps); if(ret != OK) return ret; } ps_dec->u4_output_present = 0; { ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer, &(ps_dec->s_disp_op)); /* If error code is non-zero then there is no buffer available for display, hence avoid format conversion */ if(0 != ps_dec->s_disp_op.u4_error_code) { ps_dec->u4_fmt_conv_cur_row = ps_dec->s_disp_frame_info.u4_y_ht; } else ps_dec->u4_output_present = 1; } if(ps_dec->u1_separate_parse == 1) { if(ps_dec->u4_dec_thread_created == 0) { ithread_create(ps_dec->pv_dec_thread_handle, NULL, (void *)ih264d_decode_picture_thread, (void *)ps_dec); ps_dec->u4_dec_thread_created = 1; } if((ps_dec->u4_num_cores == 3) && ((ps_dec->u4_app_disable_deblk_frm == 0) || ps_dec->i1_recon_in_thread3_flag) && (ps_dec->u4_bs_deblk_thread_created == 0)) { ps_dec->u4_start_recon_deblk = 0; ithread_create(ps_dec->pv_bs_deblk_thread_handle, NULL, (void *)ih264d_recon_deblk_thread, (void *)ps_dec); ps_dec->u4_bs_deblk_thread_created = 1; } } } /* INITIALIZATION of fn ptrs for MC and formMbPartInfo functions */ { UWORD8 uc_nofield_nombaff; uc_nofield_nombaff = ((ps_dec->ps_cur_slice->u1_field_pic_flag == 0) && (ps_dec->ps_cur_slice->u1_mbaff_frame_flag == 0) && (u1_slice_type != B_SLICE) && (ps_dec->ps_cur_pps->u1_wted_pred_flag == 0)); /* Initialise MC and formMbPartInfo fn ptrs one time based on profile_idc */ if(uc_nofield_nombaff) { ps_dec->p_form_mb_part_info = ih264d_form_mb_part_info_bp; ps_dec->p_motion_compensate = ih264d_motion_compensate_bp; } else { ps_dec->p_form_mb_part_info = ih264d_form_mb_part_info_mp; ps_dec->p_motion_compensate = ih264d_motion_compensate_mp; } } /* * Decide whether to decode the current picture or not */ { dec_err_status_t * ps_err = ps_dec->ps_dec_err_status; if(ps_err->u4_frm_sei_sync == u2_frame_num) { ps_err->u1_err_flag = ACCEPT_ALL_PICS; ps_err->u4_frm_sei_sync = SYNC_FRM_DEFAULT; } ps_err->u4_cur_frm = u2_frame_num; } /* Decision for decoding if the picture is to be skipped */ { WORD32 i4_skip_b_pic, i4_skip_p_pic; i4_skip_b_pic = (ps_dec->u4_skip_frm_mask & B_SLC_BIT) && (B_SLICE == u1_slice_type) && (0 == u1_nal_ref_idc); i4_skip_p_pic = (ps_dec->u4_skip_frm_mask & P_SLC_BIT) && (P_SLICE == u1_slice_type) && (0 == u1_nal_ref_idc); /**************************************************************/ /* Skip the B picture if skip mask is set for B picture and */ /* Current B picture is a non reference B picture or there is */ /* no user for reference B picture */ /**************************************************************/ if(i4_skip_b_pic) { ps_dec->ps_cur_pic->u4_pack_slc_typ |= B_SLC_BIT; /* Don't decode the picture in SKIP-B mode if that picture is B */ /* and also it is not to be used as a reference picture */ ps_dec->u1_last_pic_not_decoded = 1; return OK; } /**************************************************************/ /* Skip the P picture if skip mask is set for P picture and */ /* Current P picture is a non reference P picture or there is */ /* no user for reference P picture */ /**************************************************************/ if(i4_skip_p_pic) { ps_dec->ps_cur_pic->u4_pack_slc_typ |= P_SLC_BIT; /* Don't decode the picture in SKIP-P mode if that picture is P */ /* and also it is not to be used as a reference picture */ ps_dec->u1_last_pic_not_decoded = 1; return OK; } } { UWORD16 u2_mb_x, u2_mb_y; ps_dec->i4_submb_ofst = ((u2_first_mb_in_slice << ps_cur_slice->u1_mbaff_frame_flag) * SUB_BLK_SIZE) - SUB_BLK_SIZE; if(u2_first_mb_in_slice) { UWORD8 u1_mb_aff; UWORD8 u1_field_pic; UWORD16 u2_frm_wd_in_mbs; u2_frm_wd_in_mbs = ps_seq->u2_frm_wd_in_mbs; u1_mb_aff = ps_cur_slice->u1_mbaff_frame_flag; u1_field_pic = ps_cur_slice->u1_field_pic_flag; { UWORD32 x_offset; UWORD32 y_offset; UWORD32 u4_frame_stride; tfr_ctxt_t *ps_trns_addr; // = &ps_dec->s_tran_addrecon_parse; if(ps_dec->u1_separate_parse) { ps_trns_addr = &ps_dec->s_tran_addrecon_parse; } else { ps_trns_addr = &ps_dec->s_tran_addrecon; } u2_mb_x = MOD(u2_first_mb_in_slice, u2_frm_wd_in_mbs); u2_mb_y = DIV(u2_first_mb_in_slice, u2_frm_wd_in_mbs); u2_mb_y <<= u1_mb_aff; if((u2_mb_x > u2_frm_wd_in_mbs - 1) || (u2_mb_y > ps_dec->u2_frm_ht_in_mbs - 1)) { return ERROR_CORRUPTED_SLICE; } u4_frame_stride = ps_dec->u2_frm_wd_y << u1_field_pic; x_offset = u2_mb_x << 4; y_offset = (u2_mb_y * u4_frame_stride) << 4; ps_trns_addr->pu1_dest_y = ps_dec->s_cur_pic.pu1_buf1 + x_offset + y_offset; u4_frame_stride = ps_dec->u2_frm_wd_uv << u1_field_pic; x_offset >>= 1; y_offset = (u2_mb_y * u4_frame_stride) << 3; x_offset *= YUV420SP_FACTOR; ps_trns_addr->pu1_dest_u = ps_dec->s_cur_pic.pu1_buf2 + x_offset + y_offset; ps_trns_addr->pu1_dest_v = ps_dec->s_cur_pic.pu1_buf3 + x_offset + y_offset; ps_trns_addr->pu1_mb_y = ps_trns_addr->pu1_dest_y; ps_trns_addr->pu1_mb_u = ps_trns_addr->pu1_dest_u; ps_trns_addr->pu1_mb_v = ps_trns_addr->pu1_dest_v; if(ps_dec->u1_separate_parse == 1) { ps_dec->ps_deblk_mbn = ps_dec->ps_deblk_pic + (u2_first_mb_in_slice << u1_mb_aff); } else { ps_dec->ps_deblk_mbn = ps_dec->ps_deblk_pic + (u2_first_mb_in_slice << u1_mb_aff); } ps_dec->u2_cur_mb_addr = (u2_first_mb_in_slice << u1_mb_aff); ps_dec->ps_mv_cur = ps_dec->s_cur_pic.ps_mv + ((u2_first_mb_in_slice << u1_mb_aff) << 4); } } else { tfr_ctxt_t *ps_trns_addr; if(ps_dec->u1_separate_parse) { ps_trns_addr = &ps_dec->s_tran_addrecon_parse; } else { ps_trns_addr = &ps_dec->s_tran_addrecon; } u2_mb_x = 0xffff; u2_mb_y = 0; ps_dec->u2_cur_mb_addr = 0; ps_dec->ps_deblk_mbn = ps_dec->ps_deblk_pic; ps_dec->ps_mv_cur = ps_dec->s_cur_pic.ps_mv; ps_trns_addr->pu1_dest_y = ps_dec->s_cur_pic.pu1_buf1; ps_trns_addr->pu1_dest_u = ps_dec->s_cur_pic.pu1_buf2; ps_trns_addr->pu1_dest_v = ps_dec->s_cur_pic.pu1_buf3; ps_trns_addr->pu1_mb_y = ps_trns_addr->pu1_dest_y; ps_trns_addr->pu1_mb_u = ps_trns_addr->pu1_dest_u; ps_trns_addr->pu1_mb_v = ps_trns_addr->pu1_dest_v; } ps_dec->ps_part = ps_dec->ps_parse_part_params; ps_dec->u2_mbx = (MOD(u2_first_mb_in_slice - 1, ps_seq->u2_frm_wd_in_mbs)); ps_dec->u2_mby = (DIV(u2_first_mb_in_slice - 1, ps_seq->u2_frm_wd_in_mbs)); ps_dec->u2_mby <<= ps_cur_slice->u1_mbaff_frame_flag; ps_dec->i2_prev_slice_mbx = ps_dec->u2_mbx; ps_dec->i2_prev_slice_mby = ps_dec->u2_mby; } /* RBSP stop bit is used for CABAC decoding*/ ps_bitstrm->u4_max_ofst += ps_dec->ps_cur_pps->u1_entropy_coding_mode; ps_dec->u1_B = (u1_slice_type == B_SLICE); ps_dec->u4_next_mb_skip = 0; ps_dec->ps_parse_cur_slice->u4_first_mb_in_slice = ps_dec->ps_cur_slice->u2_first_mb_in_slice; ps_dec->ps_parse_cur_slice->slice_type = ps_dec->ps_cur_slice->u1_slice_type; ps_dec->u4_start_recon_deblk = 1; { WORD32 num_entries; WORD32 size; UWORD8 *pu1_buf; num_entries = MIN(MAX_FRAMES, ps_dec->u4_num_ref_frames_at_init); num_entries = 2 * ((2 * num_entries) + 1); size = num_entries * sizeof(void *); size += PAD_MAP_IDX_POC * sizeof(void *); pu1_buf = (UWORD8 *)ps_dec->pv_map_ref_idx_to_poc_buf; pu1_buf += size * ps_dec->u2_cur_slice_num; ps_dec->ps_parse_cur_slice->ppv_map_ref_idx_to_poc = ( void *)pu1_buf; } if(ps_dec->u1_separate_parse) { ps_dec->ps_parse_cur_slice->pv_tu_coeff_data_start = ps_dec->pv_parse_tu_coeff_data; } else { ps_dec->pv_proc_tu_coeff_data = ps_dec->pv_parse_tu_coeff_data; } if(u1_slice_type == I_SLICE) { ps_dec->ps_cur_pic->u4_pack_slc_typ |= I_SLC_BIT; ret = ih264d_parse_islice(ps_dec, u2_first_mb_in_slice); if(ps_dec->i4_pic_type != B_SLICE && ps_dec->i4_pic_type != P_SLICE) ps_dec->i4_pic_type = I_SLICE; } else if(u1_slice_type == P_SLICE) { ps_dec->ps_cur_pic->u4_pack_slc_typ |= P_SLC_BIT; ret = ih264d_parse_pslice(ps_dec, u2_first_mb_in_slice); ps_dec->u1_pr_sl_type = u1_slice_type; if(ps_dec->i4_pic_type != B_SLICE) ps_dec->i4_pic_type = P_SLICE; } else if(u1_slice_type == B_SLICE) { ps_dec->ps_cur_pic->u4_pack_slc_typ |= B_SLC_BIT; ret = ih264d_parse_bslice(ps_dec, u2_first_mb_in_slice); ps_dec->u1_pr_sl_type = u1_slice_type; ps_dec->i4_pic_type = B_SLICE; } else return ERROR_INV_SLC_TYPE_T; if(ps_dec->u1_slice_header_done) { /* set to zero to indicate a valid slice has been decoded */ /* first slice header successfully decoded */ ps_dec->u4_first_slice_in_pic = 0; ps_dec->u1_first_slice_in_stream = 0; } if(ret != OK) return ret; /* storing last Mb X and MbY of the slice */ ps_dec->i2_prev_slice_mbx = ps_dec->u2_mbx; ps_dec->i2_prev_slice_mby = ps_dec->u2_mby; /* End of Picture detection */ if(ps_dec->u2_total_mbs_coded >= (ps_seq->u2_max_mb_addr + 1)) { ps_dec->u1_pic_decode_done = 1; } { dec_err_status_t * ps_err = ps_dec->ps_dec_err_status; if((ps_err->u1_err_flag & REJECT_PB_PICS) && (ps_err->u1_cur_pic_type == PIC_TYPE_I)) { ps_err->u1_err_flag = ACCEPT_ALL_PICS; } } PRINT_BIN_BIT_RATIO(ps_dec) return ret; } Commit Message: Decoder: Fixed initialization of first_slice_in_pic To handle some errors, first_slice_in_pic was being set to 2. This is now cleaned up and first_slice_in_pic is set to 1 only once per pic. This will ensure picture level initializations are done only once even in case of error clips Bug: 33717589 Bug: 33551775 Bug: 33716442 Bug: 33677995 Change-Id: If341436b3cbaa724017eedddd88c2e6fac36d8ba CWE ID: CWE-200
WORD32 ih264d_parse_decode_slice(UWORD8 u1_is_idr_slice, UWORD8 u1_nal_ref_idc, dec_struct_t *ps_dec /* Decoder parameters */ ) { dec_bit_stream_t * ps_bitstrm = ps_dec->ps_bitstrm; dec_pic_params_t *ps_pps; dec_seq_params_t *ps_seq; dec_slice_params_t *ps_cur_slice = ps_dec->ps_cur_slice; pocstruct_t s_tmp_poc; WORD32 i_delta_poc[2]; WORD32 i4_poc = 0; UWORD16 u2_first_mb_in_slice, u2_frame_num; UWORD8 u1_field_pic_flag, u1_redundant_pic_cnt = 0, u1_slice_type; UWORD32 u4_idr_pic_id = 0; UWORD8 u1_bottom_field_flag, u1_pic_order_cnt_type; UWORD8 u1_nal_unit_type; UWORD32 *pu4_bitstrm_buf = ps_bitstrm->pu4_buffer; UWORD32 *pu4_bitstrm_ofst = &ps_bitstrm->u4_ofst; WORD8 i1_is_end_of_poc; WORD32 ret, end_of_frame; WORD32 prev_slice_err, num_mb_skipped; UWORD8 u1_mbaff; pocstruct_t *ps_cur_poc; UWORD32 u4_temp; WORD32 i_temp; UWORD32 u4_call_end_of_pic = 0; /* read FirstMbInSlice and slice type*/ ps_dec->ps_dpb_cmds->u1_dpb_commands_read_slc = 0; u2_first_mb_in_slice = ih264d_uev(pu4_bitstrm_ofst, pu4_bitstrm_buf); if(u2_first_mb_in_slice > (ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs)) { return ERROR_CORRUPTED_SLICE; } /*we currently don not support ASO*/ if(((u2_first_mb_in_slice << ps_cur_slice->u1_mbaff_frame_flag) <= ps_dec->u2_cur_mb_addr) && (ps_dec->u4_first_slice_in_pic == 0)) { return ERROR_CORRUPTED_SLICE; } COPYTHECONTEXT("SH: first_mb_in_slice",u2_first_mb_in_slice); u4_temp = ih264d_uev(pu4_bitstrm_ofst, pu4_bitstrm_buf); if(u4_temp > 9) return ERROR_INV_SLC_TYPE_T; u1_slice_type = u4_temp; COPYTHECONTEXT("SH: slice_type",(u1_slice_type)); ps_dec->u1_sl_typ_5_9 = 0; /* Find Out the Slice Type is 5 to 9 or not then Set the Flag */ /* u1_sl_typ_5_9 = 1 .Which tells that all the slices in the Pic*/ /* will be of same type of current */ if(u1_slice_type > 4) { u1_slice_type -= 5; ps_dec->u1_sl_typ_5_9 = 1; } { UWORD32 skip; if((ps_dec->i4_app_skip_mode == IVD_SKIP_PB) || (ps_dec->i4_dec_skip_mode == IVD_SKIP_PB)) { UWORD32 u4_bit_stream_offset = 0; if(ps_dec->u1_nal_unit_type == IDR_SLICE_NAL) { skip = 0; ps_dec->i4_dec_skip_mode = IVD_SKIP_NONE; } else if((I_SLICE == u1_slice_type) && (1 >= ps_dec->ps_cur_sps->u1_num_ref_frames)) { skip = 0; ps_dec->i4_dec_skip_mode = IVD_SKIP_NONE; } else { skip = 1; } /* If one frame worth of data is already skipped, do not skip the next one */ if((0 == u2_first_mb_in_slice) && (1 == ps_dec->u4_prev_nal_skipped)) { skip = 0; } if(skip) { ps_dec->u4_prev_nal_skipped = 1; ps_dec->i4_dec_skip_mode = IVD_SKIP_PB; return 0; } else { /* If the previous NAL was skipped, then do not process that buffer in this call. Return to app and process it in the next call. This is necessary to handle cases where I/IDR is not complete in the current buffer and application intends to fill the remaining part of the bitstream later. This ensures we process only frame worth of data in every call */ if(1 == ps_dec->u4_prev_nal_skipped) { ps_dec->u4_return_to_app = 1; return 0; } } } } u4_temp = ih264d_uev(pu4_bitstrm_ofst, pu4_bitstrm_buf); if(u4_temp & MASK_ERR_PIC_SET_ID) return ERROR_INV_SLICE_HDR_T; /* discard slice if pic param is invalid */ COPYTHECONTEXT("SH: pic_parameter_set_id", u4_temp); ps_pps = &ps_dec->ps_pps[u4_temp]; if(FALSE == ps_pps->u1_is_valid) { return ERROR_INV_SLICE_HDR_T; } ps_seq = ps_pps->ps_sps; if(!ps_seq) return ERROR_INV_SLICE_HDR_T; if(FALSE == ps_seq->u1_is_valid) return ERROR_INV_SLICE_HDR_T; /* Get the frame num */ u2_frame_num = ih264d_get_bits_h264(ps_bitstrm, ps_seq->u1_bits_in_frm_num); COPYTHECONTEXT("SH: frame_num", u2_frame_num); if(!ps_dec->u1_first_slice_in_stream && ps_dec->u4_first_slice_in_pic) { pocstruct_t *ps_prev_poc = &ps_dec->s_prev_pic_poc; pocstruct_t *ps_cur_poc = &ps_dec->s_cur_pic_poc; ps_dec->u2_mbx = 0xffff; ps_dec->u2_mby = 0; if((0 == u1_is_idr_slice) && ps_cur_slice->u1_nal_ref_idc) ps_dec->u2_prev_ref_frame_num = ps_cur_slice->u2_frame_num; if(u1_is_idr_slice || ps_cur_slice->u1_mmco_equalto5) ps_dec->u2_prev_ref_frame_num = 0; if(ps_dec->ps_cur_sps->u1_gaps_in_frame_num_value_allowed_flag) { ih264d_decode_gaps_in_frame_num(ps_dec, u2_frame_num); } ps_prev_poc->i4_prev_frame_num_ofst = ps_cur_poc->i4_prev_frame_num_ofst; ps_prev_poc->u2_frame_num = ps_cur_poc->u2_frame_num; ps_prev_poc->u1_mmco_equalto5 = ps_cur_slice->u1_mmco_equalto5; if(ps_cur_slice->u1_nal_ref_idc) { ps_prev_poc->i4_pic_order_cnt_lsb = ps_cur_poc->i4_pic_order_cnt_lsb; ps_prev_poc->i4_pic_order_cnt_msb = ps_cur_poc->i4_pic_order_cnt_msb; ps_prev_poc->i4_delta_pic_order_cnt_bottom = ps_cur_poc->i4_delta_pic_order_cnt_bottom; ps_prev_poc->i4_delta_pic_order_cnt[0] = ps_cur_poc->i4_delta_pic_order_cnt[0]; ps_prev_poc->i4_delta_pic_order_cnt[1] = ps_cur_poc->i4_delta_pic_order_cnt[1]; ps_prev_poc->u1_bot_field = ps_cur_poc->u1_bot_field; } ps_dec->u2_total_mbs_coded = 0; } /* Get the field related flags */ if(!ps_seq->u1_frame_mbs_only_flag) { u1_field_pic_flag = ih264d_get_bit_h264(ps_bitstrm); COPYTHECONTEXT("SH: field_pic_flag", u1_field_pic_flag); u1_bottom_field_flag = 0; if(u1_field_pic_flag) { ps_dec->pu1_inv_scan = (UWORD8 *)gau1_ih264d_inv_scan_fld; u1_bottom_field_flag = ih264d_get_bit_h264(ps_bitstrm); COPYTHECONTEXT("SH: bottom_field_flag", u1_bottom_field_flag); } else { ps_dec->pu1_inv_scan = (UWORD8 *)gau1_ih264d_inv_scan; } } else { u1_field_pic_flag = 0; u1_bottom_field_flag = 0; ps_dec->pu1_inv_scan = (UWORD8 *)gau1_ih264d_inv_scan; } u1_nal_unit_type = SLICE_NAL; if(u1_is_idr_slice) { if(0 == u1_field_pic_flag) { ps_dec->u1_top_bottom_decoded = TOP_FIELD_ONLY | BOT_FIELD_ONLY; } u1_nal_unit_type = IDR_SLICE_NAL; u4_idr_pic_id = ih264d_uev(pu4_bitstrm_ofst, pu4_bitstrm_buf); if(u4_idr_pic_id > 65535) return ERROR_INV_SLICE_HDR_T; COPYTHECONTEXT("SH: ", u4_idr_pic_id); } /* read delta pic order count information*/ i_delta_poc[0] = i_delta_poc[1] = 0; s_tmp_poc.i4_pic_order_cnt_lsb = 0; s_tmp_poc.i4_delta_pic_order_cnt_bottom = 0; u1_pic_order_cnt_type = ps_seq->u1_pic_order_cnt_type; if(u1_pic_order_cnt_type == 0) { i_temp = ih264d_get_bits_h264( ps_bitstrm, ps_seq->u1_log2_max_pic_order_cnt_lsb_minus); if(i_temp < 0 || i_temp >= ps_seq->i4_max_pic_order_cntLsb) return ERROR_INV_SLICE_HDR_T; s_tmp_poc.i4_pic_order_cnt_lsb = i_temp; COPYTHECONTEXT("SH: pic_order_cnt_lsb", s_tmp_poc.i4_pic_order_cnt_lsb); if((ps_pps->u1_pic_order_present_flag == 1) && (!u1_field_pic_flag)) { s_tmp_poc.i4_delta_pic_order_cnt_bottom = ih264d_sev( pu4_bitstrm_ofst, pu4_bitstrm_buf); COPYTHECONTEXT("SH: delta_pic_order_cnt_bottom", s_tmp_poc.i4_delta_pic_order_cnt_bottom); } } s_tmp_poc.i4_delta_pic_order_cnt[0] = 0; s_tmp_poc.i4_delta_pic_order_cnt[1] = 0; if(u1_pic_order_cnt_type == 1 && (!ps_seq->u1_delta_pic_order_always_zero_flag)) { s_tmp_poc.i4_delta_pic_order_cnt[0] = ih264d_sev(pu4_bitstrm_ofst, pu4_bitstrm_buf); COPYTHECONTEXT("SH: delta_pic_order_cnt[0]", s_tmp_poc.i4_delta_pic_order_cnt[0]); if(ps_pps->u1_pic_order_present_flag && !u1_field_pic_flag) { s_tmp_poc.i4_delta_pic_order_cnt[1] = ih264d_sev( pu4_bitstrm_ofst, pu4_bitstrm_buf); COPYTHECONTEXT("SH: delta_pic_order_cnt[1]", s_tmp_poc.i4_delta_pic_order_cnt[1]); } } if(ps_pps->u1_redundant_pic_cnt_present_flag) { u4_temp = ih264d_uev(pu4_bitstrm_ofst, pu4_bitstrm_buf); if(u4_temp > MAX_REDUNDANT_PIC_CNT) return ERROR_INV_SLICE_HDR_T; u1_redundant_pic_cnt = u4_temp; COPYTHECONTEXT("SH: redundant_pic_cnt", u1_redundant_pic_cnt); } /*--------------------------------------------------------------------*/ /* Check if the slice is part of new picture */ /*--------------------------------------------------------------------*/ /* First slice of a picture is always considered as part of new picture */ i1_is_end_of_poc = 1; ps_dec->ps_dec_err_status->u1_err_flag &= MASK_REJECT_CUR_PIC; if(ps_dec->u4_first_slice_in_pic == 0) { i1_is_end_of_poc = ih264d_is_end_of_pic(u2_frame_num, u1_nal_ref_idc, &s_tmp_poc, &ps_dec->s_cur_pic_poc, ps_cur_slice, u1_pic_order_cnt_type, u1_nal_unit_type, u4_idr_pic_id, u1_field_pic_flag, u1_bottom_field_flag); if(i1_is_end_of_poc) { ps_dec->u1_first_slice_in_stream = 0; return ERROR_INCOMPLETE_FRAME; } } /*--------------------------------------------------------------------*/ /* Check for error in slice and parse the missing/corrupted MB's */ /* as skip-MB's in an inserted P-slice */ /*--------------------------------------------------------------------*/ u1_mbaff = ps_seq->u1_mb_aff_flag && (!u1_field_pic_flag); prev_slice_err = 0; if(i1_is_end_of_poc || ps_dec->u1_first_slice_in_stream) { if(u2_frame_num != ps_dec->u2_prv_frame_num && ps_dec->u1_top_bottom_decoded != 0 && ps_dec->u1_top_bottom_decoded != (TOP_FIELD_ONLY | BOT_FIELD_ONLY)) { ps_dec->u1_dangling_field = 1; if(ps_dec->u4_first_slice_in_pic) { prev_slice_err = 1; } else { prev_slice_err = 2; } if(ps_dec->u1_top_bottom_decoded ==TOP_FIELD_ONLY) ps_cur_slice->u1_bottom_field_flag = 1; else ps_cur_slice->u1_bottom_field_flag = 0; num_mb_skipped = (ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs) - ps_dec->u2_total_mbs_coded; ps_cur_poc = &ps_dec->s_cur_pic_poc; u1_is_idr_slice = ps_cur_slice->u1_nal_unit_type == IDR_SLICE_NAL; } else if(ps_dec->u4_first_slice_in_pic) { if(u2_first_mb_in_slice > 0) { prev_slice_err = 1; num_mb_skipped = u2_first_mb_in_slice << u1_mbaff; ps_cur_poc = &s_tmp_poc; ps_cur_slice->u4_idr_pic_id = u4_idr_pic_id; ps_cur_slice->u1_field_pic_flag = u1_field_pic_flag; ps_cur_slice->u1_bottom_field_flag = u1_bottom_field_flag; ps_cur_slice->i4_pic_order_cnt_lsb = s_tmp_poc.i4_pic_order_cnt_lsb; ps_cur_slice->u1_nal_unit_type = u1_nal_unit_type; ps_cur_slice->u1_redundant_pic_cnt = u1_redundant_pic_cnt; ps_cur_slice->u1_nal_ref_idc = u1_nal_ref_idc; ps_cur_slice->u1_pic_order_cnt_type = u1_pic_order_cnt_type; ps_cur_slice->u1_mbaff_frame_flag = ps_seq->u1_mb_aff_flag && (!u1_field_pic_flag); } } else { /* since i1_is_end_of_poc is set ,means new frame num is encountered. so conceal the current frame * completely */ prev_slice_err = 2; num_mb_skipped = (ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs) - ps_dec->u2_total_mbs_coded; ps_cur_poc = &s_tmp_poc; } } else { if((u2_first_mb_in_slice << u1_mbaff) > ps_dec->u2_total_mbs_coded) { prev_slice_err = 2; num_mb_skipped = (u2_first_mb_in_slice << u1_mbaff) - ps_dec->u2_total_mbs_coded; ps_cur_poc = &s_tmp_poc; } else if((u2_first_mb_in_slice << u1_mbaff) < ps_dec->u2_total_mbs_coded) { return ERROR_CORRUPTED_SLICE; } } if(prev_slice_err) { ret = ih264d_mark_err_slice_skip(ps_dec, num_mb_skipped, u1_is_idr_slice, u2_frame_num, ps_cur_poc, prev_slice_err); if(ps_dec->u1_dangling_field == 1) { ps_dec->u1_second_field = 1 - ps_dec->u1_second_field; ps_cur_slice->u1_bottom_field_flag = u1_bottom_field_flag; ps_dec->u2_prv_frame_num = u2_frame_num; ps_dec->u1_first_slice_in_stream = 0; return ERROR_DANGLING_FIELD_IN_PIC; } if(prev_slice_err == 2) { ps_dec->u1_first_slice_in_stream = 0; return ERROR_INCOMPLETE_FRAME; } if(ps_dec->u2_total_mbs_coded >= ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs) { /* return if all MBs in frame are parsed*/ ps_dec->u1_first_slice_in_stream = 0; return ERROR_IN_LAST_SLICE_OF_PIC; } if(ps_dec->ps_dec_err_status->u1_err_flag & REJECT_CUR_PIC) { ih264d_err_pic_dispbuf_mgr(ps_dec); return ERROR_NEW_FRAME_EXPECTED; } if(ret != OK) return ret; i1_is_end_of_poc = 0; } if (ps_dec->u4_first_slice_in_pic == 0) { ps_dec->ps_parse_cur_slice++; ps_dec->u2_cur_slice_num++; } if((ps_dec->u1_separate_parse == 0) && (ps_dec->u4_first_slice_in_pic == 0)) { ps_dec->ps_decode_cur_slice++; } ps_dec->u1_slice_header_done = 0; if(u1_field_pic_flag) { ps_dec->u2_prv_frame_num = u2_frame_num; } if(ps_cur_slice->u1_mmco_equalto5) { WORD32 i4_temp_poc; WORD32 i4_top_field_order_poc, i4_bot_field_order_poc; if(!ps_cur_slice->u1_field_pic_flag) // or a complementary field pair { i4_top_field_order_poc = ps_dec->ps_cur_pic->i4_top_field_order_cnt; i4_bot_field_order_poc = ps_dec->ps_cur_pic->i4_bottom_field_order_cnt; i4_temp_poc = MIN(i4_top_field_order_poc, i4_bot_field_order_poc); } else if(!ps_cur_slice->u1_bottom_field_flag) i4_temp_poc = ps_dec->ps_cur_pic->i4_top_field_order_cnt; else i4_temp_poc = ps_dec->ps_cur_pic->i4_bottom_field_order_cnt; ps_dec->ps_cur_pic->i4_top_field_order_cnt = i4_temp_poc - ps_dec->ps_cur_pic->i4_top_field_order_cnt; ps_dec->ps_cur_pic->i4_bottom_field_order_cnt = i4_temp_poc - ps_dec->ps_cur_pic->i4_bottom_field_order_cnt; ps_dec->ps_cur_pic->i4_poc = i4_temp_poc; ps_dec->ps_cur_pic->i4_avg_poc = i4_temp_poc; } if(ps_dec->u4_first_slice_in_pic) { ret = ih264d_decode_pic_order_cnt(u1_is_idr_slice, u2_frame_num, &ps_dec->s_prev_pic_poc, &s_tmp_poc, ps_cur_slice, ps_pps, u1_nal_ref_idc, u1_bottom_field_flag, u1_field_pic_flag, &i4_poc); if(ret != OK) return ret; /* Display seq no calculations */ if(i4_poc >= ps_dec->i4_max_poc) ps_dec->i4_max_poc = i4_poc; /* IDR Picture or POC wrap around */ if(i4_poc == 0) { ps_dec->i4_prev_max_display_seq = ps_dec->i4_prev_max_display_seq + ps_dec->i4_max_poc + ps_dec->u1_max_dec_frame_buffering + 1; ps_dec->i4_max_poc = 0; } } /*--------------------------------------------------------------------*/ /* Copy the values read from the bitstream to the slice header and then*/ /* If the slice is first slice in picture, then do Start of Picture */ /* processing. */ /*--------------------------------------------------------------------*/ ps_cur_slice->i4_delta_pic_order_cnt[0] = i_delta_poc[0]; ps_cur_slice->i4_delta_pic_order_cnt[1] = i_delta_poc[1]; ps_cur_slice->u4_idr_pic_id = u4_idr_pic_id; ps_cur_slice->u2_first_mb_in_slice = u2_first_mb_in_slice; ps_cur_slice->u1_field_pic_flag = u1_field_pic_flag; ps_cur_slice->u1_bottom_field_flag = u1_bottom_field_flag; ps_cur_slice->u1_slice_type = u1_slice_type; ps_cur_slice->i4_pic_order_cnt_lsb = s_tmp_poc.i4_pic_order_cnt_lsb; ps_cur_slice->u1_nal_unit_type = u1_nal_unit_type; ps_cur_slice->u1_redundant_pic_cnt = u1_redundant_pic_cnt; ps_cur_slice->u1_nal_ref_idc = u1_nal_ref_idc; ps_cur_slice->u1_pic_order_cnt_type = u1_pic_order_cnt_type; if(ps_seq->u1_frame_mbs_only_flag) ps_cur_slice->u1_direct_8x8_inference_flag = ps_seq->u1_direct_8x8_inference_flag; else ps_cur_slice->u1_direct_8x8_inference_flag = 1; if(u1_slice_type == B_SLICE) { ps_cur_slice->u1_direct_spatial_mv_pred_flag = ih264d_get_bit_h264( ps_bitstrm); COPYTHECONTEXT("SH: direct_spatial_mv_pred_flag", ps_cur_slice->u1_direct_spatial_mv_pred_flag); if(ps_cur_slice->u1_direct_spatial_mv_pred_flag) ps_cur_slice->pf_decodeDirect = ih264d_decode_spatial_direct; else ps_cur_slice->pf_decodeDirect = ih264d_decode_temporal_direct; if(!((ps_pps->ps_sps->u1_mb_aff_flag) && (!u1_field_pic_flag))) ps_dec->pf_mvpred = ih264d_mvpred_nonmbaffB; } else { if(!((ps_pps->ps_sps->u1_mb_aff_flag) && (!u1_field_pic_flag))) ps_dec->pf_mvpred = ih264d_mvpred_nonmbaff; } if(ps_dec->u4_first_slice_in_pic) { if(u2_first_mb_in_slice == 0) { ret = ih264d_start_of_pic(ps_dec, i4_poc, &s_tmp_poc, u2_frame_num, ps_pps); if(ret != OK) return ret; } ps_dec->u4_output_present = 0; { ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer, &(ps_dec->s_disp_op)); /* If error code is non-zero then there is no buffer available for display, hence avoid format conversion */ if(0 != ps_dec->s_disp_op.u4_error_code) { ps_dec->u4_fmt_conv_cur_row = ps_dec->s_disp_frame_info.u4_y_ht; } else ps_dec->u4_output_present = 1; } if(ps_dec->u1_separate_parse == 1) { if(ps_dec->u4_dec_thread_created == 0) { ithread_create(ps_dec->pv_dec_thread_handle, NULL, (void *)ih264d_decode_picture_thread, (void *)ps_dec); ps_dec->u4_dec_thread_created = 1; } if((ps_dec->u4_num_cores == 3) && ((ps_dec->u4_app_disable_deblk_frm == 0) || ps_dec->i1_recon_in_thread3_flag) && (ps_dec->u4_bs_deblk_thread_created == 0)) { ps_dec->u4_start_recon_deblk = 0; ithread_create(ps_dec->pv_bs_deblk_thread_handle, NULL, (void *)ih264d_recon_deblk_thread, (void *)ps_dec); ps_dec->u4_bs_deblk_thread_created = 1; } } } /* INITIALIZATION of fn ptrs for MC and formMbPartInfo functions */ { UWORD8 uc_nofield_nombaff; uc_nofield_nombaff = ((ps_dec->ps_cur_slice->u1_field_pic_flag == 0) && (ps_dec->ps_cur_slice->u1_mbaff_frame_flag == 0) && (u1_slice_type != B_SLICE) && (ps_dec->ps_cur_pps->u1_wted_pred_flag == 0)); /* Initialise MC and formMbPartInfo fn ptrs one time based on profile_idc */ if(uc_nofield_nombaff) { ps_dec->p_form_mb_part_info = ih264d_form_mb_part_info_bp; ps_dec->p_motion_compensate = ih264d_motion_compensate_bp; } else { ps_dec->p_form_mb_part_info = ih264d_form_mb_part_info_mp; ps_dec->p_motion_compensate = ih264d_motion_compensate_mp; } } /* * Decide whether to decode the current picture or not */ { dec_err_status_t * ps_err = ps_dec->ps_dec_err_status; if(ps_err->u4_frm_sei_sync == u2_frame_num) { ps_err->u1_err_flag = ACCEPT_ALL_PICS; ps_err->u4_frm_sei_sync = SYNC_FRM_DEFAULT; } ps_err->u4_cur_frm = u2_frame_num; } /* Decision for decoding if the picture is to be skipped */ { WORD32 i4_skip_b_pic, i4_skip_p_pic; i4_skip_b_pic = (ps_dec->u4_skip_frm_mask & B_SLC_BIT) && (B_SLICE == u1_slice_type) && (0 == u1_nal_ref_idc); i4_skip_p_pic = (ps_dec->u4_skip_frm_mask & P_SLC_BIT) && (P_SLICE == u1_slice_type) && (0 == u1_nal_ref_idc); /**************************************************************/ /* Skip the B picture if skip mask is set for B picture and */ /* Current B picture is a non reference B picture or there is */ /* no user for reference B picture */ /**************************************************************/ if(i4_skip_b_pic) { ps_dec->ps_cur_pic->u4_pack_slc_typ |= B_SLC_BIT; /* Don't decode the picture in SKIP-B mode if that picture is B */ /* and also it is not to be used as a reference picture */ ps_dec->u1_last_pic_not_decoded = 1; return OK; } /**************************************************************/ /* Skip the P picture if skip mask is set for P picture and */ /* Current P picture is a non reference P picture or there is */ /* no user for reference P picture */ /**************************************************************/ if(i4_skip_p_pic) { ps_dec->ps_cur_pic->u4_pack_slc_typ |= P_SLC_BIT; /* Don't decode the picture in SKIP-P mode if that picture is P */ /* and also it is not to be used as a reference picture */ ps_dec->u1_last_pic_not_decoded = 1; return OK; } } { UWORD16 u2_mb_x, u2_mb_y; ps_dec->i4_submb_ofst = ((u2_first_mb_in_slice << ps_cur_slice->u1_mbaff_frame_flag) * SUB_BLK_SIZE) - SUB_BLK_SIZE; if(u2_first_mb_in_slice) { UWORD8 u1_mb_aff; UWORD8 u1_field_pic; UWORD16 u2_frm_wd_in_mbs; u2_frm_wd_in_mbs = ps_seq->u2_frm_wd_in_mbs; u1_mb_aff = ps_cur_slice->u1_mbaff_frame_flag; u1_field_pic = ps_cur_slice->u1_field_pic_flag; { UWORD32 x_offset; UWORD32 y_offset; UWORD32 u4_frame_stride; tfr_ctxt_t *ps_trns_addr; // = &ps_dec->s_tran_addrecon_parse; if(ps_dec->u1_separate_parse) { ps_trns_addr = &ps_dec->s_tran_addrecon_parse; } else { ps_trns_addr = &ps_dec->s_tran_addrecon; } u2_mb_x = MOD(u2_first_mb_in_slice, u2_frm_wd_in_mbs); u2_mb_y = DIV(u2_first_mb_in_slice, u2_frm_wd_in_mbs); u2_mb_y <<= u1_mb_aff; if((u2_mb_x > u2_frm_wd_in_mbs - 1) || (u2_mb_y > ps_dec->u2_frm_ht_in_mbs - 1)) { return ERROR_CORRUPTED_SLICE; } u4_frame_stride = ps_dec->u2_frm_wd_y << u1_field_pic; x_offset = u2_mb_x << 4; y_offset = (u2_mb_y * u4_frame_stride) << 4; ps_trns_addr->pu1_dest_y = ps_dec->s_cur_pic.pu1_buf1 + x_offset + y_offset; u4_frame_stride = ps_dec->u2_frm_wd_uv << u1_field_pic; x_offset >>= 1; y_offset = (u2_mb_y * u4_frame_stride) << 3; x_offset *= YUV420SP_FACTOR; ps_trns_addr->pu1_dest_u = ps_dec->s_cur_pic.pu1_buf2 + x_offset + y_offset; ps_trns_addr->pu1_dest_v = ps_dec->s_cur_pic.pu1_buf3 + x_offset + y_offset; ps_trns_addr->pu1_mb_y = ps_trns_addr->pu1_dest_y; ps_trns_addr->pu1_mb_u = ps_trns_addr->pu1_dest_u; ps_trns_addr->pu1_mb_v = ps_trns_addr->pu1_dest_v; if(ps_dec->u1_separate_parse == 1) { ps_dec->ps_deblk_mbn = ps_dec->ps_deblk_pic + (u2_first_mb_in_slice << u1_mb_aff); } else { ps_dec->ps_deblk_mbn = ps_dec->ps_deblk_pic + (u2_first_mb_in_slice << u1_mb_aff); } ps_dec->u2_cur_mb_addr = (u2_first_mb_in_slice << u1_mb_aff); ps_dec->ps_mv_cur = ps_dec->s_cur_pic.ps_mv + ((u2_first_mb_in_slice << u1_mb_aff) << 4); } } else { tfr_ctxt_t *ps_trns_addr; if(ps_dec->u1_separate_parse) { ps_trns_addr = &ps_dec->s_tran_addrecon_parse; } else { ps_trns_addr = &ps_dec->s_tran_addrecon; } u2_mb_x = 0xffff; u2_mb_y = 0; ps_dec->u2_cur_mb_addr = 0; ps_dec->ps_deblk_mbn = ps_dec->ps_deblk_pic; ps_dec->ps_mv_cur = ps_dec->s_cur_pic.ps_mv; ps_trns_addr->pu1_dest_y = ps_dec->s_cur_pic.pu1_buf1; ps_trns_addr->pu1_dest_u = ps_dec->s_cur_pic.pu1_buf2; ps_trns_addr->pu1_dest_v = ps_dec->s_cur_pic.pu1_buf3; ps_trns_addr->pu1_mb_y = ps_trns_addr->pu1_dest_y; ps_trns_addr->pu1_mb_u = ps_trns_addr->pu1_dest_u; ps_trns_addr->pu1_mb_v = ps_trns_addr->pu1_dest_v; } ps_dec->ps_part = ps_dec->ps_parse_part_params; ps_dec->u2_mbx = (MOD(u2_first_mb_in_slice - 1, ps_seq->u2_frm_wd_in_mbs)); ps_dec->u2_mby = (DIV(u2_first_mb_in_slice - 1, ps_seq->u2_frm_wd_in_mbs)); ps_dec->u2_mby <<= ps_cur_slice->u1_mbaff_frame_flag; ps_dec->i2_prev_slice_mbx = ps_dec->u2_mbx; ps_dec->i2_prev_slice_mby = ps_dec->u2_mby; } /* RBSP stop bit is used for CABAC decoding*/ ps_bitstrm->u4_max_ofst += ps_dec->ps_cur_pps->u1_entropy_coding_mode; ps_dec->u1_B = (u1_slice_type == B_SLICE); ps_dec->u4_next_mb_skip = 0; ps_dec->ps_parse_cur_slice->u4_first_mb_in_slice = ps_dec->ps_cur_slice->u2_first_mb_in_slice; ps_dec->ps_parse_cur_slice->slice_type = ps_dec->ps_cur_slice->u1_slice_type; ps_dec->u4_start_recon_deblk = 1; { WORD32 num_entries; WORD32 size; UWORD8 *pu1_buf; num_entries = MIN(MAX_FRAMES, ps_dec->u4_num_ref_frames_at_init); num_entries = 2 * ((2 * num_entries) + 1); size = num_entries * sizeof(void *); size += PAD_MAP_IDX_POC * sizeof(void *); pu1_buf = (UWORD8 *)ps_dec->pv_map_ref_idx_to_poc_buf; pu1_buf += size * ps_dec->u2_cur_slice_num; ps_dec->ps_parse_cur_slice->ppv_map_ref_idx_to_poc = ( void *)pu1_buf; } if(ps_dec->u1_separate_parse) { ps_dec->ps_parse_cur_slice->pv_tu_coeff_data_start = ps_dec->pv_parse_tu_coeff_data; } else { ps_dec->pv_proc_tu_coeff_data = ps_dec->pv_parse_tu_coeff_data; } if(u1_slice_type == I_SLICE) { ps_dec->ps_cur_pic->u4_pack_slc_typ |= I_SLC_BIT; ret = ih264d_parse_islice(ps_dec, u2_first_mb_in_slice); if(ps_dec->i4_pic_type != B_SLICE && ps_dec->i4_pic_type != P_SLICE) ps_dec->i4_pic_type = I_SLICE; } else if(u1_slice_type == P_SLICE) { ps_dec->ps_cur_pic->u4_pack_slc_typ |= P_SLC_BIT; ret = ih264d_parse_pslice(ps_dec, u2_first_mb_in_slice); ps_dec->u1_pr_sl_type = u1_slice_type; if(ps_dec->i4_pic_type != B_SLICE) ps_dec->i4_pic_type = P_SLICE; } else if(u1_slice_type == B_SLICE) { ps_dec->ps_cur_pic->u4_pack_slc_typ |= B_SLC_BIT; ret = ih264d_parse_bslice(ps_dec, u2_first_mb_in_slice); ps_dec->u1_pr_sl_type = u1_slice_type; ps_dec->i4_pic_type = B_SLICE; } else return ERROR_INV_SLC_TYPE_T; if(ps_dec->u1_slice_header_done) { /* set to zero to indicate a valid slice has been decoded */ ps_dec->u1_first_slice_in_stream = 0; } if(ret != OK) return ret; /* storing last Mb X and MbY of the slice */ ps_dec->i2_prev_slice_mbx = ps_dec->u2_mbx; ps_dec->i2_prev_slice_mby = ps_dec->u2_mby; /* End of Picture detection */ if(ps_dec->u2_total_mbs_coded >= (ps_seq->u2_max_mb_addr + 1)) { ps_dec->u1_pic_decode_done = 1; } { dec_err_status_t * ps_err = ps_dec->ps_dec_err_status; if((ps_err->u1_err_flag & REJECT_PB_PICS) && (ps_err->u1_cur_pic_type == PIC_TYPE_I)) { ps_err->u1_err_flag = ACCEPT_ALL_PICS; } } PRINT_BIN_BIT_RATIO(ps_dec) return ret; }
174,040
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void AppControllerImpl::LaunchApp(const std::string& app_id) { app_service_proxy_->Launch(app_id, ui::EventFlags::EF_NONE, apps::mojom::LaunchSource::kFromAppListGrid, display::kDefaultDisplayId); } Commit Message: Refactor the AppController implementation into a KeyedService. This is necessary to guarantee that the AppController will not outlive the AppServiceProxy, which could happen before during Profile destruction. Bug: 945427 Change-Id: I9e2089799e38d5a70a4a9aa66df5319113e7809e Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1542336 Reviewed-by: Michael Giuffrida <[email protected]> Commit-Queue: Lucas Tenório <[email protected]> Cr-Commit-Position: refs/heads/master@{#645122} CWE ID: CWE-416
void AppControllerImpl::LaunchApp(const std::string& app_id) { void AppControllerService::LaunchApp(const std::string& app_id) { app_service_proxy_->Launch(app_id, ui::EventFlags::EF_NONE, apps::mojom::LaunchSource::kFromAppListGrid, display::kDefaultDisplayId); }
172,084
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: SMB2_sess_establish_session(struct SMB2_sess_data *sess_data) { int rc = 0; struct cifs_ses *ses = sess_data->ses; mutex_lock(&ses->server->srv_mutex); if (ses->server->sign && ses->server->ops->generate_signingkey) { rc = ses->server->ops->generate_signingkey(ses); kfree(ses->auth_key.response); ses->auth_key.response = NULL; if (rc) { cifs_dbg(FYI, "SMB3 session key generation failed\n"); mutex_unlock(&ses->server->srv_mutex); goto keygen_exit; } } if (!ses->server->session_estab) { ses->server->sequence_number = 0x2; ses->server->session_estab = true; } mutex_unlock(&ses->server->srv_mutex); cifs_dbg(FYI, "SMB2/3 session established successfully\n"); spin_lock(&GlobalMid_Lock); ses->status = CifsGood; ses->need_reconnect = false; spin_unlock(&GlobalMid_Lock); keygen_exit: if (!ses->server->sign) { kfree(ses->auth_key.response); ses->auth_key.response = NULL; } return rc; } Commit Message: CIFS: Enable encryption during session setup phase In order to allow encryption on SMB connection we need to exchange a session key and generate encryption and decryption keys. Signed-off-by: Pavel Shilovsky <[email protected]> CWE ID: CWE-476
SMB2_sess_establish_session(struct SMB2_sess_data *sess_data) { int rc = 0; struct cifs_ses *ses = sess_data->ses; mutex_lock(&ses->server->srv_mutex); if (ses->server->ops->generate_signingkey) { rc = ses->server->ops->generate_signingkey(ses); if (rc) { cifs_dbg(FYI, "SMB3 session key generation failed\n"); mutex_unlock(&ses->server->srv_mutex); return rc; } } if (!ses->server->session_estab) { ses->server->sequence_number = 0x2; ses->server->session_estab = true; } mutex_unlock(&ses->server->srv_mutex); cifs_dbg(FYI, "SMB2/3 session established successfully\n"); spin_lock(&GlobalMid_Lock); ses->status = CifsGood; ses->need_reconnect = false; spin_unlock(&GlobalMid_Lock); return rc; }
169,361
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void SimpleSoftOMXComponent::onPortEnable(OMX_U32 portIndex, bool enable) { CHECK_LT(portIndex, mPorts.size()); PortInfo *port = &mPorts.editItemAt(portIndex); CHECK_EQ((int)port->mTransition, (int)PortInfo::NONE); CHECK(port->mDef.bEnabled == !enable); if (!enable) { port->mDef.bEnabled = OMX_FALSE; port->mTransition = PortInfo::DISABLING; for (size_t i = 0; i < port->mBuffers.size(); ++i) { BufferInfo *buffer = &port->mBuffers.editItemAt(i); if (buffer->mOwnedByUs) { buffer->mOwnedByUs = false; if (port->mDef.eDir == OMX_DirInput) { notifyEmptyBufferDone(buffer->mHeader); } else { CHECK_EQ(port->mDef.eDir, OMX_DirOutput); notifyFillBufferDone(buffer->mHeader); } } } port->mQueue.clear(); } else { port->mTransition = PortInfo::ENABLING; } checkTransitions(); } Commit Message: omx: prevent input port enable/disable for software codecs Bug: 29421804 Change-Id: Iba1011e9af942a6dff7f659af769a51e3f5ba66f CWE ID: CWE-264
void SimpleSoftOMXComponent::onPortEnable(OMX_U32 portIndex, bool enable) { CHECK_LT(portIndex, mPorts.size()); PortInfo *port = &mPorts.editItemAt(portIndex); CHECK_EQ((int)port->mTransition, (int)PortInfo::NONE); CHECK(port->mDef.bEnabled == !enable); if (port->mDef.eDir != OMX_DirOutput) { ALOGE("Port enable/disable allowed only on output ports."); notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL); android_errorWriteLog(0x534e4554, "29421804"); return; } if (!enable) { port->mDef.bEnabled = OMX_FALSE; port->mTransition = PortInfo::DISABLING; for (size_t i = 0; i < port->mBuffers.size(); ++i) { BufferInfo *buffer = &port->mBuffers.editItemAt(i); if (buffer->mOwnedByUs) { buffer->mOwnedByUs = false; if (port->mDef.eDir == OMX_DirInput) { notifyEmptyBufferDone(buffer->mHeader); } else { CHECK_EQ(port->mDef.eDir, OMX_DirOutput); notifyFillBufferDone(buffer->mHeader); } } } port->mQueue.clear(); } else { port->mTransition = PortInfo::ENABLING; } checkTransitions(); }
173,416
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) { /* unprotected vars, we dont care of overwrites */ static u32 challenge_timestamp; static unsigned int challenge_count; struct tcp_sock *tp = tcp_sk(sk); u32 now; /* First check our per-socket dupack rate limit. */ if (tcp_oow_rate_limited(sock_net(sk), skb, LINUX_MIB_TCPACKSKIPPEDCHALLENGE, &tp->last_oow_ack_time)) return; /* Then check the check host-wide RFC 5961 rate limit. */ now = jiffies / HZ; if (now != challenge_timestamp) { challenge_timestamp = now; challenge_count = 0; } if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); tcp_send_ack(sk); } } Commit Message: tcp: make challenge acks less predictable Yue Cao claims that current host rate limiting of challenge ACKS (RFC 5961) could leak enough information to allow a patient attacker to hijack TCP sessions. He will soon provide details in an academic paper. This patch increases the default limit from 100 to 1000, and adds some randomization so that the attacker can no longer hijack sessions without spending a considerable amount of probes. Based on initial analysis and patch from Linus. Note that we also have per socket rate limiting, so it is tempting to remove the host limit in the future. v2: randomize the count of challenge acks per second, not the period. Fixes: 282f23c6ee34 ("tcp: implement RFC 5961 3.2") Reported-by: Yue Cao <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Suggested-by: Linus Torvalds <[email protected]> Cc: Yuchung Cheng <[email protected]> Cc: Neal Cardwell <[email protected]> Acked-by: Neal Cardwell <[email protected]> Acked-by: Yuchung Cheng <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-200
static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) { /* unprotected vars, we dont care of overwrites */ static u32 challenge_timestamp; static unsigned int challenge_count; struct tcp_sock *tp = tcp_sk(sk); u32 count, now; /* First check our per-socket dupack rate limit. */ if (tcp_oow_rate_limited(sock_net(sk), skb, LINUX_MIB_TCPACKSKIPPEDCHALLENGE, &tp->last_oow_ack_time)) return; /* Then check host-wide RFC 5961 rate limit. */ now = jiffies / HZ; if (now != challenge_timestamp) { u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1; challenge_timestamp = now; WRITE_ONCE(challenge_count, half + prandom_u32_max(sysctl_tcp_challenge_ack_limit)); } count = READ_ONCE(challenge_count); if (count > 0) { WRITE_ONCE(challenge_count, count - 1); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); tcp_send_ack(sk); } }
167,133
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: isofs_export_encode_fh(struct inode *inode, __u32 *fh32, int *max_len, struct inode *parent) { struct iso_inode_info * ei = ISOFS_I(inode); int len = *max_len; int type = 1; __u16 *fh16 = (__u16*)fh32; /* * WARNING: max_len is 5 for NFSv2. Because of this * limitation, we use the lower 16 bits of fh32[1] to hold the * offset of the inode and the upper 16 bits of fh32[1] to * hold the offset of the parent. */ if (parent && (len < 5)) { *max_len = 5; return 255; } else if (len < 3) { *max_len = 3; return 255; } len = 3; fh32[0] = ei->i_iget5_block; fh16[2] = (__u16)ei->i_iget5_offset; /* fh16 [sic] */ fh32[2] = inode->i_generation; if (parent) { struct iso_inode_info *eparent; eparent = ISOFS_I(parent); fh32[3] = eparent->i_iget5_block; fh16[3] = (__u16)eparent->i_iget5_offset; /* fh16 [sic] */ fh32[4] = parent->i_generation; len = 5; type = 2; } *max_len = len; return type; } Commit Message: isofs: avoid info leak on export For type 1 the parent_offset member in struct isofs_fid gets copied uninitialized to userland. Fix this by initializing it to 0. Signed-off-by: Mathias Krause <[email protected]> Signed-off-by: Jan Kara <[email protected]> CWE ID: CWE-200
isofs_export_encode_fh(struct inode *inode, __u32 *fh32, int *max_len, struct inode *parent) { struct iso_inode_info * ei = ISOFS_I(inode); int len = *max_len; int type = 1; __u16 *fh16 = (__u16*)fh32; /* * WARNING: max_len is 5 for NFSv2. Because of this * limitation, we use the lower 16 bits of fh32[1] to hold the * offset of the inode and the upper 16 bits of fh32[1] to * hold the offset of the parent. */ if (parent && (len < 5)) { *max_len = 5; return 255; } else if (len < 3) { *max_len = 3; return 255; } len = 3; fh32[0] = ei->i_iget5_block; fh16[2] = (__u16)ei->i_iget5_offset; /* fh16 [sic] */ fh16[3] = 0; /* avoid leaking uninitialized data */ fh32[2] = inode->i_generation; if (parent) { struct iso_inode_info *eparent; eparent = ISOFS_I(parent); fh32[3] = eparent->i_iget5_block; fh16[3] = (__u16)eparent->i_iget5_offset; /* fh16 [sic] */ fh32[4] = parent->i_generation; len = 5; type = 2; } *max_len = len; return type; }
166,177
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ChromeExtensionWebContentsObserver::RenderViewCreated( content::RenderViewHost* render_view_host) { ReloadIfTerminated(render_view_host); ExtensionWebContentsObserver::RenderViewCreated(render_view_host); } Commit Message: This patch implements a mechanism for more granular link URL permissions (filtering on scheme/host). This fixes the bug that allowed PDFs to have working links to any "chrome://" URLs. BUG=528505,226927 Review URL: https://codereview.chromium.org/1362433002 Cr-Commit-Position: refs/heads/master@{#351705} CWE ID: CWE-264
void ChromeExtensionWebContentsObserver::RenderViewCreated( content::RenderViewHost* render_view_host) { ReloadIfTerminated(render_view_host); ExtensionWebContentsObserver::RenderViewCreated(render_view_host); const Extension* extension = GetExtension(render_view_host); if (!extension) return; int process_id = render_view_host->GetProcess()->GetID(); auto policy = content::ChildProcessSecurityPolicy::GetInstance(); // Components of chrome that are implemented as extensions or platform apps // are allowed to use chrome://resources/ URLs. if ((extension->is_extension() || extension->is_platform_app()) && Manifest::IsComponentLocation(extension->location())) { policy->GrantOrigin(process_id, url::Origin(GURL(content::kChromeUIResourcesURL))); } // Extensions, legacy packaged apps, and component platform apps are allowed // to use chrome://favicon/ and chrome://extension-icon/ URLs. Hosted apps are // not allowed because they are served via web servers (and are generally // never given access to Chrome APIs). if (extension->is_extension() || extension->is_legacy_packaged_app() || (extension->is_platform_app() && Manifest::IsComponentLocation(extension->location()))) { policy->GrantOrigin(process_id, url::Origin(GURL(chrome::kChromeUIFaviconURL))); policy->GrantOrigin(process_id, url::Origin(GURL(chrome::kChromeUIExtensionIconURL))); } }
171,773
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: SampleTable::~SampleTable() { delete[] mSampleToChunkEntries; mSampleToChunkEntries = NULL; delete[] mSyncSamples; mSyncSamples = NULL; delete mCompositionDeltaLookup; mCompositionDeltaLookup = NULL; delete[] mCompositionTimeDeltaEntries; mCompositionTimeDeltaEntries = NULL; delete[] mSampleTimeEntries; mSampleTimeEntries = NULL; delete[] mTimeToSample; mTimeToSample = NULL; delete mSampleIterator; mSampleIterator = NULL; } Commit Message: Resolve merge conflict when cp'ing ag/931301 to mnc-mr1-release Change-Id: I079d1db2d30d126f8aed348bd62451acf741037d CWE ID: CWE-20
SampleTable::~SampleTable() { delete[] mSampleToChunkEntries; mSampleToChunkEntries = NULL; delete[] mSyncSamples; mSyncSamples = NULL; delete mCompositionDeltaLookup; mCompositionDeltaLookup = NULL; delete[] mCompositionTimeDeltaEntries; mCompositionTimeDeltaEntries = NULL; delete[] mSampleTimeEntries; mSampleTimeEntries = NULL; delete mSampleIterator; mSampleIterator = NULL; }
174,174
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: const BlockEntry* Cluster::GetEntry(const Track* pTrack, long long time_ns) const { assert(pTrack); if (m_pSegment == NULL) // this is the special EOS cluster return pTrack->GetEOS(); #if 0 LoadBlockEntries(); if ((m_entries == NULL) || (m_entries_count <= 0)) return NULL; //return EOS here? const BlockEntry* pResult = pTrack->GetEOS(); BlockEntry** i = m_entries; assert(i); BlockEntry** const j = i + m_entries_count; while (i != j) { const BlockEntry* const pEntry = *i++; assert(pEntry); assert(!pEntry->EOS()); const Block* const pBlock = pEntry->GetBlock(); assert(pBlock); if (pBlock->GetTrackNumber() != pTrack->GetNumber()) continue; if (pTrack->VetEntry(pEntry)) { if (time_ns < 0) //just want first candidate block return pEntry; const long long ns = pBlock->GetTime(this); if (ns > time_ns) break; pResult = pEntry; } else if (time_ns >= 0) { const long long ns = pBlock->GetTime(this); if (ns > time_ns) break; } } return pResult; #else const BlockEntry* pResult = pTrack->GetEOS(); long index = 0; for (;;) { if (index >= m_entries_count) { long long pos; long len; const long status = Parse(pos, len); assert(status >= 0); if (status > 0) // completely parsed, and no more entries return pResult; if (status < 0) // should never happen return 0; assert(m_entries); assert(index < m_entries_count); } const BlockEntry* const pEntry = m_entries[index]; assert(pEntry); assert(!pEntry->EOS()); const Block* const pBlock = pEntry->GetBlock(); assert(pBlock); if (pBlock->GetTrackNumber() != pTrack->GetNumber()) { ++index; continue; } if (pTrack->VetEntry(pEntry)) { if (time_ns < 0) // just want first candidate block return pEntry; const long long ns = pBlock->GetTime(this); if (ns > time_ns) return pResult; pResult = pEntry; // have a candidate } else if (time_ns >= 0) { const long long ns = pBlock->GetTime(this); if (ns > time_ns) return pResult; } ++index; } #endif } Commit Message: external/libvpx/libwebm: Update snapshot Update libwebm snapshot. This update contains security fixes from upstream. Upstream git hash: 229f49347d19b0ca0941e072b199a242ef6c5f2b BUG=23167726 Change-Id: Id3e140e7b31ae11294724b1ecfe2e9c83b4d4207 (cherry picked from commit d0281a15b3c6bd91756e453cc9398c5ef412d99a) CWE ID: CWE-20
const BlockEntry* Cluster::GetEntry(const Track* pTrack, long long time_ns) const { assert(pTrack); if (m_pSegment == NULL) // this is the special EOS cluster return pTrack->GetEOS(); const BlockEntry* pResult = pTrack->GetEOS(); long index = 0; for (;;) { if (index >= m_entries_count) { long long pos; long len; const long status = Parse(pos, len); assert(status >= 0); if (status > 0) // completely parsed, and no more entries return pResult; if (status < 0) // should never happen return 0; assert(m_entries); assert(index < m_entries_count); } const BlockEntry* const pEntry = m_entries[index]; assert(pEntry); assert(!pEntry->EOS()); const Block* const pBlock = pEntry->GetBlock(); assert(pBlock); if (pBlock->GetTrackNumber() != pTrack->GetNumber()) { ++index; continue; } if (pTrack->VetEntry(pEntry)) { if (time_ns < 0) // just want first candidate block return pEntry; const long long ns = pBlock->GetTime(this); if (ns > time_ns) return pResult; pResult = pEntry; // have a candidate } else if (time_ns >= 0) { const long long ns = pBlock->GetTime(this); if (ns > time_ns) return pResult; } ++index; } }
173,816
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p, AVPacket *avpkt) { AVDictionary *metadata = NULL; uint32_t tag, length; int decode_next_dat = 0; int ret; for (;;) { length = bytestream2_get_bytes_left(&s->gb); if (length <= 0) { if (avctx->codec_id == AV_CODEC_ID_PNG && avctx->skip_frame == AVDISCARD_ALL) { av_frame_set_metadata(p, metadata); return 0; } if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && length == 0) { if (!(s->state & PNG_IDAT)) return 0; else goto exit_loop; } av_log(avctx, AV_LOG_ERROR, "%d bytes left\n", length); if ( s->state & PNG_ALLIMAGE && avctx->strict_std_compliance <= FF_COMPLIANCE_NORMAL) goto exit_loop; ret = AVERROR_INVALIDDATA; goto fail; } length = bytestream2_get_be32(&s->gb); if (length > 0x7fffffff || length > bytestream2_get_bytes_left(&s->gb)) { av_log(avctx, AV_LOG_ERROR, "chunk too big\n"); ret = AVERROR_INVALIDDATA; goto fail; } tag = bytestream2_get_le32(&s->gb); if (avctx->debug & FF_DEBUG_STARTCODE) av_log(avctx, AV_LOG_DEBUG, "png: tag=%c%c%c%c length=%u\n", (tag & 0xff), ((tag >> 8) & 0xff), ((tag >> 16) & 0xff), ((tag >> 24) & 0xff), length); if (avctx->codec_id == AV_CODEC_ID_PNG && avctx->skip_frame == AVDISCARD_ALL) { switch(tag) { case MKTAG('I', 'H', 'D', 'R'): case MKTAG('p', 'H', 'Y', 's'): case MKTAG('t', 'E', 'X', 't'): case MKTAG('I', 'D', 'A', 'T'): case MKTAG('t', 'R', 'N', 'S'): break; default: goto skip_tag; } } switch (tag) { case MKTAG('I', 'H', 'D', 'R'): if ((ret = decode_ihdr_chunk(avctx, s, length)) < 0) goto fail; break; case MKTAG('p', 'H', 'Y', 's'): if ((ret = decode_phys_chunk(avctx, s)) < 0) goto fail; break; case MKTAG('f', 'c', 'T', 'L'): if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG) goto skip_tag; if ((ret = decode_fctl_chunk(avctx, s, length)) < 0) goto fail; decode_next_dat = 1; break; case MKTAG('f', 'd', 'A', 'T'): if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG) goto skip_tag; if (!decode_next_dat) { ret = AVERROR_INVALIDDATA; goto fail; } bytestream2_get_be32(&s->gb); length -= 4; /* fallthrough */ case MKTAG('I', 'D', 'A', 'T'): if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && !decode_next_dat) goto skip_tag; if ((ret = decode_idat_chunk(avctx, s, length, p)) < 0) goto fail; break; case MKTAG('P', 'L', 'T', 'E'): if (decode_plte_chunk(avctx, s, length) < 0) goto skip_tag; break; case MKTAG('t', 'R', 'N', 'S'): if (decode_trns_chunk(avctx, s, length) < 0) goto skip_tag; break; case MKTAG('t', 'E', 'X', 't'): if (decode_text_chunk(s, length, 0, &metadata) < 0) av_log(avctx, AV_LOG_WARNING, "Broken tEXt chunk\n"); bytestream2_skip(&s->gb, length + 4); break; case MKTAG('z', 'T', 'X', 't'): if (decode_text_chunk(s, length, 1, &metadata) < 0) av_log(avctx, AV_LOG_WARNING, "Broken zTXt chunk\n"); bytestream2_skip(&s->gb, length + 4); break; case MKTAG('s', 'T', 'E', 'R'): { int mode = bytestream2_get_byte(&s->gb); AVStereo3D *stereo3d = av_stereo3d_create_side_data(p); if (!stereo3d) goto fail; if (mode == 0 || mode == 1) { stereo3d->type = AV_STEREO3D_SIDEBYSIDE; stereo3d->flags = mode ? 0 : AV_STEREO3D_FLAG_INVERT; } else { av_log(avctx, AV_LOG_WARNING, "Unknown value in sTER chunk (%d)\n", mode); } bytestream2_skip(&s->gb, 4); /* crc */ break; } case MKTAG('I', 'E', 'N', 'D'): if (!(s->state & PNG_ALLIMAGE)) av_log(avctx, AV_LOG_ERROR, "IEND without all image\n"); if (!(s->state & (PNG_ALLIMAGE|PNG_IDAT))) { ret = AVERROR_INVALIDDATA; goto fail; } bytestream2_skip(&s->gb, 4); /* crc */ goto exit_loop; default: /* skip tag */ skip_tag: bytestream2_skip(&s->gb, length + 4); break; } } exit_loop: if (avctx->codec_id == AV_CODEC_ID_PNG && avctx->skip_frame == AVDISCARD_ALL) { av_frame_set_metadata(p, metadata); return 0; } if (s->bits_per_pixel <= 4) handle_small_bpp(s, p); /* apply transparency if needed */ if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE) { size_t byte_depth = s->bit_depth > 8 ? 2 : 1; size_t raw_bpp = s->bpp - byte_depth; unsigned x, y; for (y = 0; y < s->height; ++y) { uint8_t *row = &s->image_buf[s->image_linesize * y]; /* since we're updating in-place, we have to go from right to left */ for (x = s->width; x > 0; --x) { uint8_t *pixel = &row[s->bpp * (x - 1)]; memmove(pixel, &row[raw_bpp * (x - 1)], raw_bpp); if (!memcmp(pixel, s->transparent_color_be, raw_bpp)) { memset(&pixel[raw_bpp], 0, byte_depth); } else { memset(&pixel[raw_bpp], 0xff, byte_depth); } } } } /* handle P-frames only if a predecessor frame is available */ if (s->last_picture.f->data[0]) { if ( !(avpkt->flags & AV_PKT_FLAG_KEY) && avctx->codec_tag != AV_RL32("MPNG") && s->last_picture.f->width == p->width && s->last_picture.f->height== p->height && s->last_picture.f->format== p->format ) { if (CONFIG_PNG_DECODER && avctx->codec_id != AV_CODEC_ID_APNG) handle_p_frame_png(s, p); else if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && (ret = handle_p_frame_apng(avctx, s, p)) < 0) goto fail; } } ff_thread_report_progress(&s->picture, INT_MAX, 0); ff_thread_report_progress(&s->previous_picture, INT_MAX, 0); av_frame_set_metadata(p, metadata); metadata = NULL; return 0; fail: av_dict_free(&metadata); ff_thread_report_progress(&s->picture, INT_MAX, 0); ff_thread_report_progress(&s->previous_picture, INT_MAX, 0); return ret; } Commit Message: avcodec/pngdec: Check trns more completely Fixes out of array access Fixes: 546/clusterfuzz-testcase-4809433909559296 Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/targets/ffmpeg Signed-off-by: Michael Niedermayer <[email protected]> CWE ID: CWE-787
static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p, AVPacket *avpkt) { AVDictionary *metadata = NULL; uint32_t tag, length; int decode_next_dat = 0; int ret; for (;;) { length = bytestream2_get_bytes_left(&s->gb); if (length <= 0) { if (avctx->codec_id == AV_CODEC_ID_PNG && avctx->skip_frame == AVDISCARD_ALL) { av_frame_set_metadata(p, metadata); return 0; } if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && length == 0) { if (!(s->state & PNG_IDAT)) return 0; else goto exit_loop; } av_log(avctx, AV_LOG_ERROR, "%d bytes left\n", length); if ( s->state & PNG_ALLIMAGE && avctx->strict_std_compliance <= FF_COMPLIANCE_NORMAL) goto exit_loop; ret = AVERROR_INVALIDDATA; goto fail; } length = bytestream2_get_be32(&s->gb); if (length > 0x7fffffff || length > bytestream2_get_bytes_left(&s->gb)) { av_log(avctx, AV_LOG_ERROR, "chunk too big\n"); ret = AVERROR_INVALIDDATA; goto fail; } tag = bytestream2_get_le32(&s->gb); if (avctx->debug & FF_DEBUG_STARTCODE) av_log(avctx, AV_LOG_DEBUG, "png: tag=%c%c%c%c length=%u\n", (tag & 0xff), ((tag >> 8) & 0xff), ((tag >> 16) & 0xff), ((tag >> 24) & 0xff), length); if (avctx->codec_id == AV_CODEC_ID_PNG && avctx->skip_frame == AVDISCARD_ALL) { switch(tag) { case MKTAG('I', 'H', 'D', 'R'): case MKTAG('p', 'H', 'Y', 's'): case MKTAG('t', 'E', 'X', 't'): case MKTAG('I', 'D', 'A', 'T'): case MKTAG('t', 'R', 'N', 'S'): break; default: goto skip_tag; } } switch (tag) { case MKTAG('I', 'H', 'D', 'R'): if ((ret = decode_ihdr_chunk(avctx, s, length)) < 0) goto fail; break; case MKTAG('p', 'H', 'Y', 's'): if ((ret = decode_phys_chunk(avctx, s)) < 0) goto fail; break; case MKTAG('f', 'c', 'T', 'L'): if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG) goto skip_tag; if ((ret = decode_fctl_chunk(avctx, s, length)) < 0) goto fail; decode_next_dat = 1; break; case MKTAG('f', 'd', 'A', 'T'): if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG) goto skip_tag; if (!decode_next_dat) { ret = AVERROR_INVALIDDATA; goto fail; } bytestream2_get_be32(&s->gb); length -= 4; /* fallthrough */ case MKTAG('I', 'D', 'A', 'T'): if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && !decode_next_dat) goto skip_tag; if ((ret = decode_idat_chunk(avctx, s, length, p)) < 0) goto fail; break; case MKTAG('P', 'L', 'T', 'E'): if (decode_plte_chunk(avctx, s, length) < 0) goto skip_tag; break; case MKTAG('t', 'R', 'N', 'S'): if (decode_trns_chunk(avctx, s, length) < 0) goto skip_tag; break; case MKTAG('t', 'E', 'X', 't'): if (decode_text_chunk(s, length, 0, &metadata) < 0) av_log(avctx, AV_LOG_WARNING, "Broken tEXt chunk\n"); bytestream2_skip(&s->gb, length + 4); break; case MKTAG('z', 'T', 'X', 't'): if (decode_text_chunk(s, length, 1, &metadata) < 0) av_log(avctx, AV_LOG_WARNING, "Broken zTXt chunk\n"); bytestream2_skip(&s->gb, length + 4); break; case MKTAG('s', 'T', 'E', 'R'): { int mode = bytestream2_get_byte(&s->gb); AVStereo3D *stereo3d = av_stereo3d_create_side_data(p); if (!stereo3d) goto fail; if (mode == 0 || mode == 1) { stereo3d->type = AV_STEREO3D_SIDEBYSIDE; stereo3d->flags = mode ? 0 : AV_STEREO3D_FLAG_INVERT; } else { av_log(avctx, AV_LOG_WARNING, "Unknown value in sTER chunk (%d)\n", mode); } bytestream2_skip(&s->gb, 4); /* crc */ break; } case MKTAG('I', 'E', 'N', 'D'): if (!(s->state & PNG_ALLIMAGE)) av_log(avctx, AV_LOG_ERROR, "IEND without all image\n"); if (!(s->state & (PNG_ALLIMAGE|PNG_IDAT))) { ret = AVERROR_INVALIDDATA; goto fail; } bytestream2_skip(&s->gb, 4); /* crc */ goto exit_loop; default: /* skip tag */ skip_tag: bytestream2_skip(&s->gb, length + 4); break; } } exit_loop: if (avctx->codec_id == AV_CODEC_ID_PNG && avctx->skip_frame == AVDISCARD_ALL) { av_frame_set_metadata(p, metadata); return 0; } if (s->bits_per_pixel <= 4) handle_small_bpp(s, p); /* apply transparency if needed */ if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE) { size_t byte_depth = s->bit_depth > 8 ? 2 : 1; size_t raw_bpp = s->bpp - byte_depth; unsigned x, y; av_assert0(s->bit_depth > 1); for (y = 0; y < s->height; ++y) { uint8_t *row = &s->image_buf[s->image_linesize * y]; /* since we're updating in-place, we have to go from right to left */ for (x = s->width; x > 0; --x) { uint8_t *pixel = &row[s->bpp * (x - 1)]; memmove(pixel, &row[raw_bpp * (x - 1)], raw_bpp); if (!memcmp(pixel, s->transparent_color_be, raw_bpp)) { memset(&pixel[raw_bpp], 0, byte_depth); } else { memset(&pixel[raw_bpp], 0xff, byte_depth); } } } } /* handle P-frames only if a predecessor frame is available */ if (s->last_picture.f->data[0]) { if ( !(avpkt->flags & AV_PKT_FLAG_KEY) && avctx->codec_tag != AV_RL32("MPNG") && s->last_picture.f->width == p->width && s->last_picture.f->height== p->height && s->last_picture.f->format== p->format ) { if (CONFIG_PNG_DECODER && avctx->codec_id != AV_CODEC_ID_APNG) handle_p_frame_png(s, p); else if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && (ret = handle_p_frame_apng(avctx, s, p)) < 0) goto fail; } } ff_thread_report_progress(&s->picture, INT_MAX, 0); ff_thread_report_progress(&s->previous_picture, INT_MAX, 0); av_frame_set_metadata(p, metadata); metadata = NULL; return 0; fail: av_dict_free(&metadata); ff_thread_report_progress(&s->picture, INT_MAX, 0); ff_thread_report_progress(&s->previous_picture, INT_MAX, 0); return ret; }
168,247
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static long restore_tm_sigcontexts(struct pt_regs *regs, struct sigcontext __user *sc, struct sigcontext __user *tm_sc) { #ifdef CONFIG_ALTIVEC elf_vrreg_t __user *v_regs, *tm_v_regs; #endif unsigned long err = 0; unsigned long msr; #ifdef CONFIG_VSX int i; #endif /* copy the GPRs */ err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr)); err |= __copy_from_user(&current->thread.ckpt_regs, sc->gp_regs, sizeof(regs->gpr)); /* * TFHAR is restored from the checkpointed 'wound-back' ucontext's NIP. * TEXASR was set by the signal delivery reclaim, as was TFIAR. * Users doing anything abhorrent like thread-switching w/ signals for * TM-Suspended code will have to back TEXASR/TFIAR up themselves. * For the case of getting a signal and simply returning from it, * we don't need to re-copy them here. */ err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]); err |= __get_user(current->thread.tm_tfhar, &sc->gp_regs[PT_NIP]); /* get MSR separately, transfer the LE bit if doing signal return */ err |= __get_user(msr, &sc->gp_regs[PT_MSR]); /* pull in MSR TM from user context */ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); /* pull in MSR LE from user context */ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); /* The following non-GPR non-FPR non-VR state is also checkpointed: */ err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]); err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]); err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]); err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]); err |= __get_user(current->thread.ckpt_regs.ctr, &sc->gp_regs[PT_CTR]); err |= __get_user(current->thread.ckpt_regs.link, &sc->gp_regs[PT_LNK]); err |= __get_user(current->thread.ckpt_regs.xer, &sc->gp_regs[PT_XER]); err |= __get_user(current->thread.ckpt_regs.ccr, &sc->gp_regs[PT_CCR]); /* These regs are not checkpointed; they can go in 'regs'. */ err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]); err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); /* * Do this before updating the thread state in * current->thread.fpr/vr. That way, if we get preempted * and another task grabs the FPU/Altivec, it won't be * tempted to save the current CPU state into the thread_struct * and corrupt what we are writing there. */ discard_lazy_cpu_state(); /* * Force reload of FP/VEC. * This has to be done before copying stuff into current->thread.fpr/vr * for the reasons explained in the previous comment. */ regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); #ifdef CONFIG_ALTIVEC err |= __get_user(v_regs, &sc->v_regs); err |= __get_user(tm_v_regs, &tm_sc->v_regs); if (err) return err; if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) return -EFAULT; if (tm_v_regs && !access_ok(VERIFY_READ, tm_v_regs, 34 * sizeof(vector128))) return -EFAULT; /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) { err |= __copy_from_user(&current->thread.vr_state, v_regs, 33 * sizeof(vector128)); err |= __copy_from_user(&current->thread.transact_vr, tm_v_regs, 33 * sizeof(vector128)); } else if (current->thread.used_vr) { memset(&current->thread.vr_state, 0, 33 * sizeof(vector128)); memset(&current->thread.transact_vr, 0, 33 * sizeof(vector128)); } /* Always get VRSAVE back */ if (v_regs != NULL && tm_v_regs != NULL) { err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); err |= __get_user(current->thread.transact_vrsave, (u32 __user *)&tm_v_regs[33]); } else { current->thread.vrsave = 0; current->thread.transact_vrsave = 0; } if (cpu_has_feature(CPU_FTR_ALTIVEC)) mtspr(SPRN_VRSAVE, current->thread.vrsave); #endif /* CONFIG_ALTIVEC */ /* restore floating point */ err |= copy_fpr_from_user(current, &sc->fp_regs); err |= copy_transact_fpr_from_user(current, &tm_sc->fp_regs); #ifdef CONFIG_VSX /* * Get additional VSX data. Update v_regs to point after the * VMX data. Copy VSX low doubleword from userspace to local * buffer for formatting, then into the taskstruct. */ if (v_regs && ((msr & MSR_VSX) != 0)) { v_regs += ELF_NVRREG; tm_v_regs += ELF_NVRREG; err |= copy_vsx_from_user(current, v_regs); err |= copy_transact_vsx_from_user(current, tm_v_regs); } else { for (i = 0; i < 32 ; i++) { current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0; } } #endif tm_enable(); /* Make sure the transaction is marked as failed */ current->thread.tm_texasr |= TEXASR_FS; /* This loads the checkpointed FP/VEC state, if used */ tm_recheckpoint(&current->thread, msr); /* This loads the speculative FP/VEC state, if used */ if (msr & MSR_FP) { do_load_up_transact_fpu(&current->thread); regs->msr |= (MSR_FP | current->thread.fpexc_mode); } #ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { do_load_up_transact_altivec(&current->thread); regs->msr |= MSR_VEC; } #endif return err; } Commit Message: powerpc/tm: Block signal return setting invalid MSR state Currently we allow both the MSR T and S bits to be set by userspace on a signal return. Unfortunately this is a reserved configuration and will cause a TM Bad Thing exception if attempted (via rfid). This patch checks for this case in both the 32 and 64 bit signals code. If both T and S are set, we mark the context as invalid. Found using a syscall fuzzer. Fixes: 2b0a576d15e0 ("powerpc: Add new transactional memory state to the signal context") Cc: [email protected] # v3.9+ Signed-off-by: Michael Neuling <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> CWE ID: CWE-20
static long restore_tm_sigcontexts(struct pt_regs *regs, struct sigcontext __user *sc, struct sigcontext __user *tm_sc) { #ifdef CONFIG_ALTIVEC elf_vrreg_t __user *v_regs, *tm_v_regs; #endif unsigned long err = 0; unsigned long msr; #ifdef CONFIG_VSX int i; #endif /* copy the GPRs */ err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr)); err |= __copy_from_user(&current->thread.ckpt_regs, sc->gp_regs, sizeof(regs->gpr)); /* * TFHAR is restored from the checkpointed 'wound-back' ucontext's NIP. * TEXASR was set by the signal delivery reclaim, as was TFIAR. * Users doing anything abhorrent like thread-switching w/ signals for * TM-Suspended code will have to back TEXASR/TFIAR up themselves. * For the case of getting a signal and simply returning from it, * we don't need to re-copy them here. */ err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]); err |= __get_user(current->thread.tm_tfhar, &sc->gp_regs[PT_NIP]); /* get MSR separately, transfer the LE bit if doing signal return */ err |= __get_user(msr, &sc->gp_regs[PT_MSR]); /* Don't allow reserved mode. */ if (MSR_TM_RESV(msr)) return -EINVAL; /* pull in MSR TM from user context */ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); /* pull in MSR LE from user context */ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); /* The following non-GPR non-FPR non-VR state is also checkpointed: */ err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]); err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]); err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]); err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]); err |= __get_user(current->thread.ckpt_regs.ctr, &sc->gp_regs[PT_CTR]); err |= __get_user(current->thread.ckpt_regs.link, &sc->gp_regs[PT_LNK]); err |= __get_user(current->thread.ckpt_regs.xer, &sc->gp_regs[PT_XER]); err |= __get_user(current->thread.ckpt_regs.ccr, &sc->gp_regs[PT_CCR]); /* These regs are not checkpointed; they can go in 'regs'. */ err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]); err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); /* * Do this before updating the thread state in * current->thread.fpr/vr. That way, if we get preempted * and another task grabs the FPU/Altivec, it won't be * tempted to save the current CPU state into the thread_struct * and corrupt what we are writing there. */ discard_lazy_cpu_state(); /* * Force reload of FP/VEC. * This has to be done before copying stuff into current->thread.fpr/vr * for the reasons explained in the previous comment. */ regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); #ifdef CONFIG_ALTIVEC err |= __get_user(v_regs, &sc->v_regs); err |= __get_user(tm_v_regs, &tm_sc->v_regs); if (err) return err; if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) return -EFAULT; if (tm_v_regs && !access_ok(VERIFY_READ, tm_v_regs, 34 * sizeof(vector128))) return -EFAULT; /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) { err |= __copy_from_user(&current->thread.vr_state, v_regs, 33 * sizeof(vector128)); err |= __copy_from_user(&current->thread.transact_vr, tm_v_regs, 33 * sizeof(vector128)); } else if (current->thread.used_vr) { memset(&current->thread.vr_state, 0, 33 * sizeof(vector128)); memset(&current->thread.transact_vr, 0, 33 * sizeof(vector128)); } /* Always get VRSAVE back */ if (v_regs != NULL && tm_v_regs != NULL) { err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); err |= __get_user(current->thread.transact_vrsave, (u32 __user *)&tm_v_regs[33]); } else { current->thread.vrsave = 0; current->thread.transact_vrsave = 0; } if (cpu_has_feature(CPU_FTR_ALTIVEC)) mtspr(SPRN_VRSAVE, current->thread.vrsave); #endif /* CONFIG_ALTIVEC */ /* restore floating point */ err |= copy_fpr_from_user(current, &sc->fp_regs); err |= copy_transact_fpr_from_user(current, &tm_sc->fp_regs); #ifdef CONFIG_VSX /* * Get additional VSX data. Update v_regs to point after the * VMX data. Copy VSX low doubleword from userspace to local * buffer for formatting, then into the taskstruct. */ if (v_regs && ((msr & MSR_VSX) != 0)) { v_regs += ELF_NVRREG; tm_v_regs += ELF_NVRREG; err |= copy_vsx_from_user(current, v_regs); err |= copy_transact_vsx_from_user(current, tm_v_regs); } else { for (i = 0; i < 32 ; i++) { current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0; } } #endif tm_enable(); /* Make sure the transaction is marked as failed */ current->thread.tm_texasr |= TEXASR_FS; /* This loads the checkpointed FP/VEC state, if used */ tm_recheckpoint(&current->thread, msr); /* This loads the speculative FP/VEC state, if used */ if (msr & MSR_FP) { do_load_up_transact_fpu(&current->thread); regs->msr |= (MSR_FP | current->thread.fpexc_mode); } #ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { do_load_up_transact_altivec(&current->thread); regs->msr |= MSR_VEC; } #endif return err; }
167,482
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: rdpsnddbg_process(STREAM s) { unsigned int pkglen; static char *rest = NULL; char *buf; pkglen = s->end - s->p; /* str_handle_lines requires null terminated strings */ buf = (char *) xmalloc(pkglen + 1); STRNCPY(buf, (char *) s->p, pkglen + 1); str_handle_lines(buf, &rest, rdpsnddbg_line_handler, NULL); xfree(buf); } Commit Message: Malicious RDP server security fixes This commit includes fixes for a set of 21 vulnerabilities in rdesktop when a malicious RDP server is used. All vulnerabilities was identified and reported by Eyal Itkin. * Add rdp_protocol_error function that is used in several fixes * Refactor of process_bitmap_updates * Fix possible integer overflow in s_check_rem() on 32bit arch * Fix memory corruption in process_bitmap_data - CVE-2018-8794 * Fix remote code execution in process_bitmap_data - CVE-2018-8795 * Fix remote code execution in process_plane - CVE-2018-8797 * Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175 * Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175 * Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176 * Fix Denial of Service in sec_recv - CVE-2018-20176 * Fix minor information leak in rdpdr_process - CVE-2018-8791 * Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792 * Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793 * Fix Denial of Service in process_bitmap_data - CVE-2018-8796 * Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798 * Fix Denial of Service in process_secondary_order - CVE-2018-8799 * Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800 * Fix major information leak in ui_clip_handle_data - CVE-2018-20174 * Fix memory corruption in rdp_in_unistr - CVE-2018-20177 * Fix Denial of Service in process_demand_active - CVE-2018-20178 * Fix remote code execution in lspci_process - CVE-2018-20179 * Fix remote code execution in rdpsnddbg_process - CVE-2018-20180 * Fix remote code execution in seamless_process - CVE-2018-20181 * Fix remote code execution in seamless_process_line - CVE-2018-20182 CWE ID: CWE-119
rdpsnddbg_process(STREAM s) { unsigned int pkglen; static char *rest = NULL; char *buf; if (!s_check(s)) { rdp_protocol_error("rdpsnddbg_process(), stream is in unstable state", s); } pkglen = s->end - s->p; /* str_handle_lines requires null terminated strings */ buf = (char *) xmalloc(pkglen + 1); STRNCPY(buf, (char *) s->p, pkglen + 1); str_handle_lines(buf, &rest, rdpsnddbg_line_handler, NULL); xfree(buf); }
169,807
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, struct udphdr *uh) { struct udp_offload_priv *uo_priv; struct sk_buff *p, **pp = NULL; struct udphdr *uh2; unsigned int off = skb_gro_offset(skb); int flush = 1; if (NAPI_GRO_CB(skb)->udp_mark || (skb->ip_summed != CHECKSUM_PARTIAL && NAPI_GRO_CB(skb)->csum_cnt == 0 && !NAPI_GRO_CB(skb)->csum_valid)) goto out; /* mark that this skb passed once through the udp gro layer */ NAPI_GRO_CB(skb)->udp_mark = 1; rcu_read_lock(); uo_priv = rcu_dereference(udp_offload_base); for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) && uo_priv->offload->port == uh->dest && uo_priv->offload->callbacks.gro_receive) goto unflush; } goto out_unlock; unflush: flush = 0; for (p = *head; p; p = p->next) { if (!NAPI_GRO_CB(p)->same_flow) continue; uh2 = (struct udphdr *)(p->data + off); /* Match ports and either checksums are either both zero * or nonzero. */ if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) || (!uh->check ^ !uh2->check)) { NAPI_GRO_CB(p)->same_flow = 0; continue; } } skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; pp = uo_priv->offload->callbacks.gro_receive(head, skb, uo_priv->offload); out_unlock: rcu_read_unlock(); out: NAPI_GRO_CB(skb)->flush |= flush; return pp; } Commit Message: tunnels: Don't apply GRO to multiple layers of encapsulation. When drivers express support for TSO of encapsulated packets, they only mean that they can do it for one layer of encapsulation. Supporting additional levels would mean updating, at a minimum, more IP length fields and they are unaware of this. No encapsulation device expresses support for handling offloaded encapsulated packets, so we won't generate these types of frames in the transmit path. However, GRO doesn't have a check for multiple levels of encapsulation and will attempt to build them. UDP tunnel GRO actually does prevent this situation but it only handles multiple UDP tunnels stacked on top of each other. This generalizes that solution to prevent any kind of tunnel stacking that would cause problems. Fixes: bf5a755f ("net-gre-gro: Add GRE support to the GRO stack") Signed-off-by: Jesse Gross <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-400
struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, struct udphdr *uh) { struct udp_offload_priv *uo_priv; struct sk_buff *p, **pp = NULL; struct udphdr *uh2; unsigned int off = skb_gro_offset(skb); int flush = 1; if (NAPI_GRO_CB(skb)->encap_mark || (skb->ip_summed != CHECKSUM_PARTIAL && NAPI_GRO_CB(skb)->csum_cnt == 0 && !NAPI_GRO_CB(skb)->csum_valid)) goto out; /* mark that this skb passed once through the tunnel gro layer */ NAPI_GRO_CB(skb)->encap_mark = 1; rcu_read_lock(); uo_priv = rcu_dereference(udp_offload_base); for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) && uo_priv->offload->port == uh->dest && uo_priv->offload->callbacks.gro_receive) goto unflush; } goto out_unlock; unflush: flush = 0; for (p = *head; p; p = p->next) { if (!NAPI_GRO_CB(p)->same_flow) continue; uh2 = (struct udphdr *)(p->data + off); /* Match ports and either checksums are either both zero * or nonzero. */ if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) || (!uh->check ^ !uh2->check)) { NAPI_GRO_CB(p)->same_flow = 0; continue; } } skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; pp = uo_priv->offload->callbacks.gro_receive(head, skb, uo_priv->offload); out_unlock: rcu_read_unlock(); out: NAPI_GRO_CB(skb)->flush |= flush; return pp; }
166,907
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: long ssl_get_algorithm2(SSL *s) { long alg2 = s->s3->tmp.new_cipher->algorithm2; if (TLS1_get_version(s) >= TLS1_2_VERSION && alg2 == (SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF)) return SSL_HANDSHAKE_MAC_SHA256 | TLS1_PRF_SHA256; return alg2; } Commit Message: CWE ID: CWE-310
long ssl_get_algorithm2(SSL *s) { long alg2 = s->s3->tmp.new_cipher->algorithm2; if (s->method->version == TLS1_2_VERSION && alg2 == (SSL_HANDSHAKE_MAC_DEFAULT|TLS1_PRF)) return SSL_HANDSHAKE_MAC_SHA256 | TLS1_PRF_SHA256; return alg2; }
164,567
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool asn1_read_BOOLEAN_context(struct asn1_data *data, bool *v, int context) { uint8_t tmp = 0; asn1_start_tag(data, ASN1_CONTEXT_SIMPLE(context)); asn1_read_uint8(data, &tmp); if (tmp == 0xFF) { *v = true; } else { *v = false; } asn1_end_tag(data); return !data->has_error; } Commit Message: CWE ID: CWE-399
bool asn1_read_BOOLEAN_context(struct asn1_data *data, bool *v, int context) { uint8_t tmp = 0; if (!asn1_start_tag(data, ASN1_CONTEXT_SIMPLE(context))) return false; *v = false; if (!asn1_read_uint8(data, &tmp)) return false; if (tmp == 0xFF) { *v = true; } return asn1_end_tag(data); }
164,584
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void regulator_ena_gpio_free(struct regulator_dev *rdev) { struct regulator_enable_gpio *pin, *n; if (!rdev->ena_pin) return; /* Free the GPIO only in case of no use */ list_for_each_entry_safe(pin, n, &regulator_ena_gpio_list, list) { if (pin->gpiod == rdev->ena_pin->gpiod) { if (pin->request_count <= 1) { pin->request_count = 0; gpiod_put(pin->gpiod); list_del(&pin->list); kfree(pin); } else { pin->request_count--; } } } } Commit Message: regulator: core: Fix regualtor_ena_gpio_free not to access pin after freeing After freeing pin from regulator_ena_gpio_free, loop can access the pin. So this patch fixes not to access pin after freeing. Signed-off-by: Seung-Woo Kim <[email protected]> Signed-off-by: Mark Brown <[email protected]> CWE ID: CWE-416
static void regulator_ena_gpio_free(struct regulator_dev *rdev) { struct regulator_enable_gpio *pin, *n; if (!rdev->ena_pin) return; /* Free the GPIO only in case of no use */ list_for_each_entry_safe(pin, n, &regulator_ena_gpio_list, list) { if (pin->gpiod == rdev->ena_pin->gpiod) { if (pin->request_count <= 1) { pin->request_count = 0; gpiod_put(pin->gpiod); list_del(&pin->list); kfree(pin); rdev->ena_pin = NULL; return; } else { pin->request_count--; } } } }
168,894
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: parse_wbxml_attribute_list_defined (proto_tree *tree, tvbuff_t *tvb, guint32 offset, guint32 str_tbl, guint8 level, guint8 *codepage_attr, const wbxml_decoding *map) { guint32 tvb_len = tvb_reported_length (tvb); guint32 off = offset; guint32 len; guint str_len; guint32 ent; guint32 idx; guint8 peek; guint8 attr_save_known = 0; /* Will contain peek & 0x3F (attr identity) */ const char *attr_save_literal = NULL; /* Will contain the LITERAL attr identity */ DebugLog(("parse_wbxml_attr_defined (level = %u, offset = %u)\n", level, offset)); /* Parse attributes */ while (off < tvb_len) { peek = tvb_get_guint8 (tvb, off); DebugLog(("ATTR: (top of while) level = %3u, peek = 0x%02X, " "off = %u, tvb_len = %u\n", level, peek, off, tvb_len)); if ((peek & 0x3F) < 5) switch (peek) { /* Global tokens in state = ATTR */ case 0x00: /* SWITCH_PAGE */ *codepage_attr = tvb_get_guint8 (tvb, off+1); proto_tree_add_text (tree, tvb, off, 2, " | Attr | A -->%3d " "| SWITCH_PAGE (Attr code page) |", *codepage_attr); off += 2; break; case 0x01: /* END */ /* BEWARE * The Attribute END token means either ">" or "/>" * and as a consequence both must be treated separately. * This is done in the TAG state parser. */ off++; DebugLog(("ATTR: level = %u, Return: len = %u\n", level, off - offset)); return (off - offset); case 0x02: /* ENTITY */ ent = tvb_get_guintvar (tvb, off+1, &len); proto_tree_add_text (tree, tvb, off, 1+len, " %3d | Attr | A %3d " "| ENTITY " "| %s'&#%u;'", level, *codepage_attr, Indent (level), ent); off += 1+len; break; case 0x03: /* STR_I */ len = tvb_strsize (tvb, off+1); proto_tree_add_text (tree, tvb, off, 1+len, " %3d | Attr | A %3d " "| STR_I (Inline string) " "| %s\'%s\'", level, *codepage_attr, Indent (level), tvb_format_text (tvb, off+1, len-1)); off += 1+len; break; case 0x04: /* LITERAL */ /* ALWAYS means the start of a new attribute, * and may only contain the NAME of the attribute. */ idx = tvb_get_guintvar (tvb, off+1, &len); str_len = tvb_strsize (tvb, str_tbl+idx); attr_save_known = 0; attr_save_literal = tvb_format_text (tvb, str_tbl+idx, str_len-1); proto_tree_add_text (tree, tvb, off, 1+len, " %3d | Attr | A %3d " "| LITERAL (Literal Attribute) " "| %s<%s />", level, *codepage_attr, Indent (level), attr_save_literal); off += 1+len; break; case 0x40: /* EXT_I_0 */ case 0x41: /* EXT_I_1 */ case 0x42: /* EXT_I_2 */ /* Extension tokens */ len = tvb_strsize (tvb, off+1); proto_tree_add_text (tree, tvb, off, 1+len, " %3d | Attr | A %3d " "| EXT_I_%1x (Extension Token) " "| %s(%s: \'%s\')", level, *codepage_attr, peek & 0x0f, Indent (level), map_token (map->global, 0, peek), tvb_format_text (tvb, off+1, len-1)); off += 1+len; break; /* 0x43 impossible in ATTR state */ /* 0x44 impossible in ATTR state */ case 0x80: /* EXT_T_0 */ case 0x81: /* EXT_T_1 */ case 0x82: /* EXT_T_2 */ /* Extension tokens */ idx = tvb_get_guintvar (tvb, off+1, &len); { char *s; if (map->ext_t[peek & 0x03]) s = (map->ext_t[peek & 0x03])(tvb, idx, str_tbl); else s = wmem_strdup_printf(wmem_packet_scope(), "EXT_T_%1x (%s)", peek & 0x03, map_token (map->global, 0, peek)); proto_tree_add_text (tree, tvb, off, 1+len, " %3d | Tag | T %3d " "| EXT_T_%1x (Extension Token) " "| %s%s)", level, *codepage_attr, peek & 0x0f, Indent (level), s); } off += 1+len; break; case 0x83: /* STR_T */ idx = tvb_get_guintvar (tvb, off+1, &len); str_len = tvb_strsize (tvb, str_tbl+idx); proto_tree_add_text (tree, tvb, off, 1+len, " %3d | Attr | A %3d " "| STR_T (Tableref string) " "| %s\'%s\'", level, *codepage_attr, Indent (level), tvb_format_text (tvb, str_tbl+idx, str_len-1)); off += 1+len; break; /* 0x84 impossible in ATTR state */ case 0xC0: /* EXT_0 */ case 0xC1: /* EXT_1 */ case 0xC2: /* EXT_2 */ /* Extension tokens */ proto_tree_add_text (tree, tvb, off, 1, " %3d | Attr | A %3d " "| EXT_%1x (Extension Token) " "| %s(%s)", level, *codepage_attr, peek & 0x0f, Indent (level), map_token (map->global, 0, peek)); off++; break; case 0xC3: /* OPAQUE - WBXML 1.1 and newer */ if (tvb_get_guint8 (tvb, 0)) { /* WBXML 1.x (x > 0) */ char *str; if (attr_save_known) { /* Knwon attribute */ if (map->opaque_binary_attr) { str = map->opaque_binary_attr(tvb, off + 1, attr_save_known, *codepage_attr, &len); } else { str = default_opaque_binary_attr(tvb, off + 1, attr_save_known, *codepage_attr, &len); } } else { /* lITERAL attribute */ if (map->opaque_literal_tag) { str = map->opaque_literal_attr(tvb, off + 1, attr_save_literal, *codepage_attr, &len); } else { str = default_opaque_literal_attr(tvb, off + 1, attr_save_literal, *codepage_attr, &len); } } proto_tree_add_text (tree, tvb, off, 1 + len, " %3d | Attr | A %3d " "| OPAQUE (Opaque data) " "| %s%s", level, *codepage_attr, Indent (level), str); off += 1 + len; } else { /* WBXML 1.0 - RESERVED_2 token (invalid) */ proto_tree_add_text (tree, tvb, off, 1, " %3d | Attr | A %3d " "| RESERVED_2 (Invalid Token!) " "| WBXML 1.0 parsing stops here.", level, *codepage_attr); /* Stop processing as it is impossible to parse now */ off = tvb_len; DebugLog(("ATTR: level = %u, Return: len = %u\n", level, off - offset)); return (off - offset); } break; /* 0xC4 impossible in ATTR state */ default: proto_tree_add_text (tree, tvb, off, 1, " %3d | Attr | A %3d " "| %-10s (Invalid Token!) " "| WBXML parsing stops here.", level, *codepage_attr, val_to_str_ext (peek, &vals_wbxml1x_global_tokens_ext, "(unknown 0x%x)")); /* Move to end of buffer */ off = tvb_len; break; } else { /* Known atribute token */ if (peek & 0x80) { /* attrValue */ proto_tree_add_text (tree, tvb, off, 1, " %3d | Attr | A %3d " "| Known attrValue 0x%02X " "| %s%s", level, *codepage_attr, peek & 0x7f, Indent (level), map_token (map->attrValue, *codepage_attr, peek)); off++; } else { /* attrStart */ attr_save_known = peek & 0x7f; proto_tree_add_text (tree, tvb, off, 1, " %3d | Attr | A %3d " "| Known attrStart 0x%02X " "| %s%s", level, *codepage_attr, attr_save_known, Indent (level), map_token (map->attrStart, *codepage_attr, peek)); off++; } } } /* End WHILE */ DebugLog(("ATTR: level = %u, Return: len = %u (end of function body)\n", level, off - offset)); return (off - offset); } Commit Message: WBXML: add a basic sanity check for offset overflow This is a naive approach allowing to detact that something went wrong, without the need to replace all proto_tree_add_text() calls as what was done in master-2.0 branch. Bug: 12408 Change-Id: Ia14905005e17ae322c2fc639ad5e491fa08b0108 Reviewed-on: https://code.wireshark.org/review/15310 Reviewed-by: Michael Mann <[email protected]> Reviewed-by: Pascal Quantin <[email protected]> CWE ID: CWE-119
parse_wbxml_attribute_list_defined (proto_tree *tree, tvbuff_t *tvb, guint32 offset, guint32 str_tbl, guint8 level, guint8 *codepage_attr, const wbxml_decoding *map) { guint32 tvb_len = tvb_reported_length (tvb); guint32 off = offset, last_off; guint32 len; guint str_len; guint32 ent; guint32 idx; guint8 peek; guint8 attr_save_known = 0; /* Will contain peek & 0x3F (attr identity) */ const char *attr_save_literal = NULL; /* Will contain the LITERAL attr identity */ DebugLog(("parse_wbxml_attr_defined (level = %u, offset = %u)\n", level, offset)); /* Parse attributes */ last_off = off; while (off < tvb_len) { peek = tvb_get_guint8 (tvb, off); DebugLog(("ATTR: (top of while) level = %3u, peek = 0x%02X, " "off = %u, tvb_len = %u\n", level, peek, off, tvb_len)); if ((peek & 0x3F) < 5) switch (peek) { /* Global tokens in state = ATTR */ case 0x00: /* SWITCH_PAGE */ *codepage_attr = tvb_get_guint8 (tvb, off+1); proto_tree_add_text (tree, tvb, off, 2, " | Attr | A -->%3d " "| SWITCH_PAGE (Attr code page) |", *codepage_attr); off += 2; break; case 0x01: /* END */ /* BEWARE * The Attribute END token means either ">" or "/>" * and as a consequence both must be treated separately. * This is done in the TAG state parser. */ off++; DebugLog(("ATTR: level = %u, Return: len = %u\n", level, off - offset)); return (off - offset); case 0x02: /* ENTITY */ ent = tvb_get_guintvar (tvb, off+1, &len); proto_tree_add_text (tree, tvb, off, 1+len, " %3d | Attr | A %3d " "| ENTITY " "| %s'&#%u;'", level, *codepage_attr, Indent (level), ent); off += 1+len; break; case 0x03: /* STR_I */ len = tvb_strsize (tvb, off+1); proto_tree_add_text (tree, tvb, off, 1+len, " %3d | Attr | A %3d " "| STR_I (Inline string) " "| %s\'%s\'", level, *codepage_attr, Indent (level), tvb_format_text (tvb, off+1, len-1)); off += 1+len; break; case 0x04: /* LITERAL */ /* ALWAYS means the start of a new attribute, * and may only contain the NAME of the attribute. */ idx = tvb_get_guintvar (tvb, off+1, &len); str_len = tvb_strsize (tvb, str_tbl+idx); attr_save_known = 0; attr_save_literal = tvb_format_text (tvb, str_tbl+idx, str_len-1); proto_tree_add_text (tree, tvb, off, 1+len, " %3d | Attr | A %3d " "| LITERAL (Literal Attribute) " "| %s<%s />", level, *codepage_attr, Indent (level), attr_save_literal); off += 1+len; break; case 0x40: /* EXT_I_0 */ case 0x41: /* EXT_I_1 */ case 0x42: /* EXT_I_2 */ /* Extension tokens */ len = tvb_strsize (tvb, off+1); proto_tree_add_text (tree, tvb, off, 1+len, " %3d | Attr | A %3d " "| EXT_I_%1x (Extension Token) " "| %s(%s: \'%s\')", level, *codepage_attr, peek & 0x0f, Indent (level), map_token (map->global, 0, peek), tvb_format_text (tvb, off+1, len-1)); off += 1+len; break; /* 0x43 impossible in ATTR state */ /* 0x44 impossible in ATTR state */ case 0x80: /* EXT_T_0 */ case 0x81: /* EXT_T_1 */ case 0x82: /* EXT_T_2 */ /* Extension tokens */ idx = tvb_get_guintvar (tvb, off+1, &len); { char *s; if (map->ext_t[peek & 0x03]) s = (map->ext_t[peek & 0x03])(tvb, idx, str_tbl); else s = wmem_strdup_printf(wmem_packet_scope(), "EXT_T_%1x (%s)", peek & 0x03, map_token (map->global, 0, peek)); proto_tree_add_text (tree, tvb, off, 1+len, " %3d | Tag | T %3d " "| EXT_T_%1x (Extension Token) " "| %s%s)", level, *codepage_attr, peek & 0x0f, Indent (level), s); } off += 1+len; break; case 0x83: /* STR_T */ idx = tvb_get_guintvar (tvb, off+1, &len); str_len = tvb_strsize (tvb, str_tbl+idx); proto_tree_add_text (tree, tvb, off, 1+len, " %3d | Attr | A %3d " "| STR_T (Tableref string) " "| %s\'%s\'", level, *codepage_attr, Indent (level), tvb_format_text (tvb, str_tbl+idx, str_len-1)); off += 1+len; break; /* 0x84 impossible in ATTR state */ case 0xC0: /* EXT_0 */ case 0xC1: /* EXT_1 */ case 0xC2: /* EXT_2 */ /* Extension tokens */ proto_tree_add_text (tree, tvb, off, 1, " %3d | Attr | A %3d " "| EXT_%1x (Extension Token) " "| %s(%s)", level, *codepage_attr, peek & 0x0f, Indent (level), map_token (map->global, 0, peek)); off++; break; case 0xC3: /* OPAQUE - WBXML 1.1 and newer */ if (tvb_get_guint8 (tvb, 0)) { /* WBXML 1.x (x > 0) */ char *str; if (attr_save_known) { /* Knwon attribute */ if (map->opaque_binary_attr) { str = map->opaque_binary_attr(tvb, off + 1, attr_save_known, *codepage_attr, &len); } else { str = default_opaque_binary_attr(tvb, off + 1, attr_save_known, *codepage_attr, &len); } } else { /* lITERAL attribute */ if (map->opaque_literal_tag) { str = map->opaque_literal_attr(tvb, off + 1, attr_save_literal, *codepage_attr, &len); } else { str = default_opaque_literal_attr(tvb, off + 1, attr_save_literal, *codepage_attr, &len); } } proto_tree_add_text (tree, tvb, off, 1 + len, " %3d | Attr | A %3d " "| OPAQUE (Opaque data) " "| %s%s", level, *codepage_attr, Indent (level), str); off += 1 + len; } else { /* WBXML 1.0 - RESERVED_2 token (invalid) */ proto_tree_add_text (tree, tvb, off, 1, " %3d | Attr | A %3d " "| RESERVED_2 (Invalid Token!) " "| WBXML 1.0 parsing stops here.", level, *codepage_attr); /* Stop processing as it is impossible to parse now */ off = tvb_len; DebugLog(("ATTR: level = %u, Return: len = %u\n", level, off - offset)); return (off - offset); } break; /* 0xC4 impossible in ATTR state */ default: proto_tree_add_text (tree, tvb, off, 1, " %3d | Attr | A %3d " "| %-10s (Invalid Token!) " "| WBXML parsing stops here.", level, *codepage_attr, val_to_str_ext (peek, &vals_wbxml1x_global_tokens_ext, "(unknown 0x%x)")); /* Move to end of buffer */ off = tvb_len; break; } else { /* Known atribute token */ if (peek & 0x80) { /* attrValue */ proto_tree_add_text (tree, tvb, off, 1, " %3d | Attr | A %3d " "| Known attrValue 0x%02X " "| %s%s", level, *codepage_attr, peek & 0x7f, Indent (level), map_token (map->attrValue, *codepage_attr, peek)); off++; } else { /* attrStart */ attr_save_known = peek & 0x7f; proto_tree_add_text (tree, tvb, off, 1, " %3d | Attr | A %3d " "| Known attrStart 0x%02X " "| %s%s", level, *codepage_attr, attr_save_known, Indent (level), map_token (map->attrStart, *codepage_attr, peek)); off++; } } if (off < last_off) { THROW(ReportedBoundsError); } last_off = off; } /* End WHILE */ DebugLog(("ATTR: level = %u, Return: len = %u (end of function body)\n", level, off - offset)); return (off - offset); }
167,140
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int exif_process_IFD_in_JPEG(image_info_type *ImageInfo, char *dir_start, char *offset_base, size_t IFDlength, size_t displacement, int section_index TSRMLS_DC) { int de; int NumDirEntries; int NextDirOffset; #ifdef EXIF_DEBUG exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process %s (x%04X(=%d))", exif_get_sectionname(section_index), IFDlength, IFDlength); #endif ImageInfo->sections_found |= FOUND_IFD0; NumDirEntries = php_ifd_get16u(dir_start, ImageInfo->motorola_intel); if ((dir_start+2+NumDirEntries*12) > (offset_base+IFDlength)) { if (!exif_process_IFD_TAG(ImageInfo, dir_start + 2 + 12 * de, offset_base, IFDlength, displacement, section_index, 1, exif_get_tag_table(section_index) TSRMLS_CC)) { return FALSE; } } /* * Ignore IFD2 if it purportedly exists */ if (section_index == SECTION_THUMBNAIL) { return TRUE; } /* * Hack to make it process IDF1 I hope * There are 2 IDFs, the second one holds the keys (0x0201 and 0x0202) to the thumbnail */ NextDirOffset = php_ifd_get32u(dir_start+2+12*de, ImageInfo->motorola_intel); if (NextDirOffset) { * Hack to make it process IDF1 I hope * There are 2 IDFs, the second one holds the keys (0x0201 and 0x0202) to the thumbnail */ NextDirOffset = php_ifd_get32u(dir_start+2+12*de, ImageInfo->motorola_intel); if (NextDirOffset) { /* the next line seems false but here IFDlength means length of all IFDs */ #ifdef EXIF_DEBUG exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail size: 0x%04X", ImageInfo->Thumbnail.size); #endif if (ImageInfo->Thumbnail.filetype != IMAGE_FILETYPE_UNKNOWN && ImageInfo->Thumbnail.size && ImageInfo->Thumbnail.offset && ImageInfo->read_thumbnail ) { exif_thumbnail_extract(ImageInfo, offset_base, IFDlength TSRMLS_CC); } return TRUE; } else { return FALSE; } } return TRUE; } Commit Message: CWE ID: CWE-119
static int exif_process_IFD_in_JPEG(image_info_type *ImageInfo, char *dir_start, char *offset_base, size_t IFDlength, size_t displacement, int section_index TSRMLS_DC) { int de; int NumDirEntries; int NextDirOffset; #ifdef EXIF_DEBUG exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Process %s (x%04X(=%d))", exif_get_sectionname(section_index), IFDlength, IFDlength); #endif ImageInfo->sections_found |= FOUND_IFD0; if ((dir_start + 2) >= (offset_base+IFDlength)) { exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD size"); return FALSE; } NumDirEntries = php_ifd_get16u(dir_start, ImageInfo->motorola_intel); if ((dir_start+2+NumDirEntries*12) > (offset_base+IFDlength)) { if (!exif_process_IFD_TAG(ImageInfo, dir_start + 2 + 12 * de, offset_base, IFDlength, displacement, section_index, 1, exif_get_tag_table(section_index) TSRMLS_CC)) { return FALSE; } } /* * Ignore IFD2 if it purportedly exists */ if (section_index == SECTION_THUMBNAIL) { return TRUE; } /* * Hack to make it process IDF1 I hope * There are 2 IDFs, the second one holds the keys (0x0201 and 0x0202) to the thumbnail */ NextDirOffset = php_ifd_get32u(dir_start+2+12*de, ImageInfo->motorola_intel); if (NextDirOffset) { * Hack to make it process IDF1 I hope * There are 2 IDFs, the second one holds the keys (0x0201 and 0x0202) to the thumbnail */ if ((dir_start+2+12*de + 4) >= (offset_base+IFDlength)) { exif_error_docref("exif_read_data#error_ifd" EXIFERR_CC, ImageInfo, E_WARNING, "Illegal IFD size"); return FALSE; } NextDirOffset = php_ifd_get32u(dir_start+2+12*de, ImageInfo->motorola_intel); if (NextDirOffset) { /* the next line seems false but here IFDlength means length of all IFDs */ #ifdef EXIF_DEBUG exif_error_docref(NULL EXIFERR_CC, ImageInfo, E_NOTICE, "Thumbnail size: 0x%04X", ImageInfo->Thumbnail.size); #endif if (ImageInfo->Thumbnail.filetype != IMAGE_FILETYPE_UNKNOWN && ImageInfo->Thumbnail.size && ImageInfo->Thumbnail.offset && ImageInfo->read_thumbnail ) { exif_thumbnail_extract(ImageInfo, offset_base, IFDlength TSRMLS_CC); } return TRUE; } else { return FALSE; } } return TRUE; }
165,033
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static bool getCoverageFormat12(vector<uint32_t>& coverage, const uint8_t* data, size_t size) { const size_t kNGroupsOffset = 12; const size_t kFirstGroupOffset = 16; const size_t kGroupSize = 12; const size_t kStartCharCodeOffset = 0; const size_t kEndCharCodeOffset = 4; const size_t kMaxNGroups = 0xfffffff0 / kGroupSize; // protection against overflow if (kFirstGroupOffset > size) { return false; } uint32_t nGroups = readU32(data, kNGroupsOffset); if (nGroups >= kMaxNGroups || kFirstGroupOffset + nGroups * kGroupSize > size) { return false; } for (uint32_t i = 0; i < nGroups; i++) { uint32_t groupOffset = kFirstGroupOffset + i * kGroupSize; uint32_t start = readU32(data, groupOffset + kStartCharCodeOffset); uint32_t end = readU32(data, groupOffset + kEndCharCodeOffset); addRange(coverage, start, end + 1); // file is inclusive, vector is exclusive } return true; } Commit Message: Reject fonts with invalid ranges in cmap A corrupt or malicious font may have a negative size in its cmap range, which in turn could lead to memory corruption. This patch detects the case and rejects the font, and also includes an assertion in the sparse bit set implementation if we missed any such case. External issue: https://code.google.com/p/android/issues/detail?id=192618 Bug: 26413177 Change-Id: Icc0c80e4ef389abba0964495b89aa0fae3e9f4b2 CWE ID: CWE-20
static bool getCoverageFormat12(vector<uint32_t>& coverage, const uint8_t* data, size_t size) { const size_t kNGroupsOffset = 12; const size_t kFirstGroupOffset = 16; const size_t kGroupSize = 12; const size_t kStartCharCodeOffset = 0; const size_t kEndCharCodeOffset = 4; const size_t kMaxNGroups = 0xfffffff0 / kGroupSize; // protection against overflow if (kFirstGroupOffset > size) { return false; } uint32_t nGroups = readU32(data, kNGroupsOffset); if (nGroups >= kMaxNGroups || kFirstGroupOffset + nGroups * kGroupSize > size) { return false; } for (uint32_t i = 0; i < nGroups; i++) { uint32_t groupOffset = kFirstGroupOffset + i * kGroupSize; uint32_t start = readU32(data, groupOffset + kStartCharCodeOffset); uint32_t end = readU32(data, groupOffset + kEndCharCodeOffset); if (end < start) { // invalid group range: size must be positive return false; } addRange(coverage, start, end + 1); // file is inclusive, vector is exclusive } return true; }
174,234
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void Unregister(const GURL& url) { EXPECT_TRUE( content::BrowserThread::CurrentlyOn(content::BrowserThread::IO)); net::URLRequestFilter::GetInstance()->RemoveUrlHandler(url); } Commit Message: Fix ChromeResourceDispatcherHostDelegateMirrorBrowserTest.MirrorRequestHeader with network service. The functionality worked, as part of converting DICE, however the test code didn't work since it depended on accessing the net objects directly. Switch the tests to use the EmbeddedTestServer, to better match production, which removes the dependency on net/. Also: -make GetFilePathWithReplacements replace strings in the mock headers if they're present -add a global to google_util to ignore ports; that way other tests can be converted without having to modify each callsite to google_util Bug: 881976 Change-Id: Ic52023495c1c98c1248025c11cdf37f433fef058 Reviewed-on: https://chromium-review.googlesource.com/c/1328142 Commit-Queue: John Abd-El-Malek <[email protected]> Reviewed-by: Ramin Halavati <[email protected]> Reviewed-by: Maks Orlovich <[email protected]> Reviewed-by: Peter Kasting <[email protected]> Cr-Commit-Position: refs/heads/master@{#607652} CWE ID:
static void Unregister(const GURL& url) { void WillStartRequest(network::ResourceRequest* request, bool* defer) override { request->headers.SetHeader(signin::kChromeConnectedHeader, "User Data"); }
172,582
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void VRDisplay::BeginPresent() { Document* doc = this->GetDocument(); if (capabilities_->hasExternalDisplay()) { ForceExitPresent(); DOMException* exception = DOMException::Create( kInvalidStateError, "VR Presentation not implemented for this VRDisplay."); while (!pending_present_resolvers_.IsEmpty()) { ScriptPromiseResolver* resolver = pending_present_resolvers_.TakeFirst(); resolver->Reject(exception); } ReportPresentationResult( PresentationResult::kPresentationNotSupportedByDisplay); return; } else { if (layer_.source().isHTMLCanvasElement()) { } else { DCHECK(layer_.source().isOffscreenCanvas()); ForceExitPresent(); DOMException* exception = DOMException::Create( kInvalidStateError, "OffscreenCanvas presentation not implemented."); while (!pending_present_resolvers_.IsEmpty()) { ScriptPromiseResolver* resolver = pending_present_resolvers_.TakeFirst(); resolver->Reject(exception); } ReportPresentationResult( PresentationResult::kPresentationNotSupportedByDisplay); return; } } if (doc) { Platform::Current()->RecordRapporURL("VR.WebVR.PresentSuccess", WebURL(doc->Url())); } is_presenting_ = true; ReportPresentationResult(PresentationResult::kSuccess); UpdateLayerBounds(); while (!pending_present_resolvers_.IsEmpty()) { ScriptPromiseResolver* resolver = pending_present_resolvers_.TakeFirst(); resolver->Resolve(); } OnPresentChange(); } Commit Message: WebVR: fix initial vsync Applications sometimes use window.rAF while not presenting, then switch to vrDisplay.rAF after presentation starts. Depending on the animation loop's timing, this can cause a race condition where presentation has been started but there's no vrDisplay.rAF pending yet. Ensure there's at least vsync being processed after presentation starts so that a queued window.rAF can run and schedule a vrDisplay.rAF. BUG=711789 Review-Url: https://codereview.chromium.org/2848483003 Cr-Commit-Position: refs/heads/master@{#468167} CWE ID:
void VRDisplay::BeginPresent() { Document* doc = this->GetDocument(); if (capabilities_->hasExternalDisplay()) { ForceExitPresent(); DOMException* exception = DOMException::Create( kInvalidStateError, "VR Presentation not implemented for this VRDisplay."); while (!pending_present_resolvers_.IsEmpty()) { ScriptPromiseResolver* resolver = pending_present_resolvers_.TakeFirst(); resolver->Reject(exception); } ReportPresentationResult( PresentationResult::kPresentationNotSupportedByDisplay); return; } else { if (layer_.source().isHTMLCanvasElement()) { } else { DCHECK(layer_.source().isOffscreenCanvas()); ForceExitPresent(); DOMException* exception = DOMException::Create( kInvalidStateError, "OffscreenCanvas presentation not implemented."); while (!pending_present_resolvers_.IsEmpty()) { ScriptPromiseResolver* resolver = pending_present_resolvers_.TakeFirst(); resolver->Reject(exception); } ReportPresentationResult( PresentationResult::kPresentationNotSupportedByDisplay); return; } } if (doc) { Platform::Current()->RecordRapporURL("VR.WebVR.PresentSuccess", WebURL(doc->Url())); } is_presenting_ = true; ReportPresentationResult(PresentationResult::kSuccess); UpdateLayerBounds(); while (!pending_present_resolvers_.IsEmpty()) { ScriptPromiseResolver* resolver = pending_present_resolvers_.TakeFirst(); resolver->Resolve(); } OnPresentChange(); // For GVR, we shut down normal vsync processing during VR presentation. // Run window.rAF once manually so that applications get a chance to // schedule a VRDisplay.rAF in case they do so only while presenting. if (!pending_vrdisplay_raf_ && !capabilities_->hasExternalDisplay()) { double timestamp = WTF::MonotonicallyIncreasingTime(); Platform::Current()->CurrentThread()->GetWebTaskRunner()->PostTask( BLINK_FROM_HERE, WTF::Bind(&VRDisplay::ProcessScheduledWindowAnimations, WrapWeakPersistent(this), timestamp)); } }
171,991
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void PrintRenderFrameHelper::PrintHeaderAndFooter( blink::WebCanvas* canvas, int page_number, int total_pages, const blink::WebLocalFrame& source_frame, float webkit_scale_factor, const PageSizeMargins& page_layout, const PrintMsg_Print_Params& params) { cc::PaintCanvasAutoRestore auto_restore(canvas, true); canvas->scale(1 / webkit_scale_factor, 1 / webkit_scale_factor); blink::WebSize page_size(page_layout.margin_left + page_layout.margin_right + page_layout.content_width, page_layout.margin_top + page_layout.margin_bottom + page_layout.content_height); blink::WebView* web_view = blink::WebView::Create( nullptr, blink::mojom::PageVisibilityState::kVisible); web_view->GetSettings()->SetJavaScriptEnabled(true); class HeaderAndFooterClient final : public blink::WebFrameClient { public: void BindToFrame(blink::WebLocalFrame* frame) override { frame_ = frame; } void FrameDetached(DetachType detach_type) override { frame_->FrameWidget()->Close(); frame_->Close(); frame_ = nullptr; } private: blink::WebLocalFrame* frame_; }; HeaderAndFooterClient frame_client; blink::WebLocalFrame* frame = blink::WebLocalFrame::CreateMainFrame( web_view, &frame_client, nullptr, nullptr); blink::WebWidgetClient web_widget_client; blink::WebFrameWidget::Create(&web_widget_client, frame); base::Value html(base::UTF8ToUTF16( ui::ResourceBundle::GetSharedInstance().GetRawDataResource( IDR_PRINT_PREVIEW_PAGE))); ExecuteScript(frame, kPageLoadScriptFormat, html); auto options = base::MakeUnique<base::DictionaryValue>(); options->SetDouble(kSettingHeaderFooterDate, base::Time::Now().ToJsTime()); options->SetDouble("width", page_size.width); options->SetDouble("height", page_size.height); options->SetDouble("topMargin", page_layout.margin_top); options->SetDouble("bottomMargin", page_layout.margin_bottom); options->SetInteger("pageNumber", page_number); options->SetInteger("totalPages", total_pages); options->SetString("url", params.url); base::string16 title = source_frame.GetDocument().Title().Utf16(); options->SetString("title", title.empty() ? params.title : title); ExecuteScript(frame, kPageSetupScriptFormat, *options); blink::WebPrintParams webkit_params(page_size); webkit_params.printer_dpi = GetDPI(&params); frame->PrintBegin(webkit_params); frame->PrintPage(0, canvas); frame->PrintEnd(); web_view->Close(); } Commit Message: DevTools: allow styling the page number element when printing over the protocol. Bug: none Change-Id: I13e6afbd86a7c6bcdedbf0645183194b9de7cfb4 Reviewed-on: https://chromium-review.googlesource.com/809759 Commit-Queue: Pavel Feldman <[email protected]> Reviewed-by: Lei Zhang <[email protected]> Reviewed-by: Tom Sepez <[email protected]> Reviewed-by: Jianzhou Feng <[email protected]> Cr-Commit-Position: refs/heads/master@{#523966} CWE ID: CWE-20
void PrintRenderFrameHelper::PrintHeaderAndFooter( blink::WebCanvas* canvas, int page_number, int total_pages, const blink::WebLocalFrame& source_frame, float webkit_scale_factor, const PageSizeMargins& page_layout, const PrintMsg_Print_Params& params) { cc::PaintCanvasAutoRestore auto_restore(canvas, true); canvas->scale(1 / webkit_scale_factor, 1 / webkit_scale_factor); blink::WebSize page_size(page_layout.margin_left + page_layout.margin_right + page_layout.content_width, page_layout.margin_top + page_layout.margin_bottom + page_layout.content_height); blink::WebView* web_view = blink::WebView::Create( nullptr, blink::mojom::PageVisibilityState::kVisible); web_view->GetSettings()->SetJavaScriptEnabled(true); class HeaderAndFooterClient final : public blink::WebFrameClient { public: void BindToFrame(blink::WebLocalFrame* frame) override { frame_ = frame; } void FrameDetached(DetachType detach_type) override { frame_->FrameWidget()->Close(); frame_->Close(); frame_ = nullptr; } private: blink::WebLocalFrame* frame_; }; HeaderAndFooterClient frame_client; blink::WebLocalFrame* frame = blink::WebLocalFrame::CreateMainFrame( web_view, &frame_client, nullptr, nullptr); blink::WebWidgetClient web_widget_client; blink::WebFrameWidget::Create(&web_widget_client, frame); base::Value html(base::UTF8ToUTF16( ui::ResourceBundle::GetSharedInstance().GetRawDataResource( IDR_PRINT_PREVIEW_PAGE))); ExecuteScript(frame, kPageLoadScriptFormat, html); auto options = base::MakeUnique<base::DictionaryValue>(); options->SetDouble(kSettingHeaderFooterDate, base::Time::Now().ToJsTime()); options->SetDouble("width", page_size.width); options->SetDouble("height", page_size.height); options->SetDouble("topMargin", page_layout.margin_top); options->SetDouble("bottomMargin", page_layout.margin_bottom); options->SetInteger("pageNumber", page_number); options->SetInteger("totalPages", total_pages); options->SetString("url", params.url); base::string16 title = source_frame.GetDocument().Title().Utf16(); options->SetString("title", title.empty() ? params.title : title); options->SetString("headerTemplate", params.header_template); options->SetString("footerTemplate", params.footer_template); ExecuteScript(frame, kPageSetupScriptFormat, *options); blink::WebPrintParams webkit_params(page_size); webkit_params.printer_dpi = GetDPI(&params); frame->PrintBegin(webkit_params); frame->PrintPage(0, canvas); frame->PrintEnd(); web_view->Close(); }
172,899
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static MagickBooleanType WriteImageChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const MagickBooleanType separate,ExceptionInfo *exception) { size_t channels, packet_size; unsigned char *compact_pixels; /* Write uncompressed pixels as separate planes. */ channels=1; packet_size=next_image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) NULL; if (next_image->compression == RLECompression) { compact_pixels=(unsigned char *) AcquireQuantumMemory(2*channels* next_image->columns,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } if (IsImageGray(next_image) != MagickFalse) { if (next_image->compression == RLECompression) { /* Packbits compression. */ (void) WriteBlobMSBShort(image,1); WritePackbitsLength(psd_info,image_info,image,next_image, compact_pixels,GrayQuantum,exception); if (next_image->alpha_trait != UndefinedPixelTrait) WritePackbitsLength(psd_info,image_info,image,next_image, compact_pixels,AlphaQuantum,exception); } WriteOneChannel(psd_info,image_info,image,next_image,compact_pixels, GrayQuantum,MagickTrue,exception); if (next_image->alpha_trait != UndefinedPixelTrait) WriteOneChannel(psd_info,image_info,image,next_image,compact_pixels, AlphaQuantum,separate,exception); (void) SetImageProgress(image,SaveImagesTag,0,1); } else if (next_image->storage_class == PseudoClass) { if (next_image->compression == RLECompression) { /* Packbits compression. */ (void) WriteBlobMSBShort(image,1); WritePackbitsLength(psd_info,image_info,image,next_image, compact_pixels,IndexQuantum,exception); if (next_image->alpha_trait != UndefinedPixelTrait) WritePackbitsLength(psd_info,image_info,image,next_image, compact_pixels,AlphaQuantum,exception); } WriteOneChannel(psd_info,image_info,image,next_image,compact_pixels, IndexQuantum,MagickTrue,exception); if (next_image->alpha_trait != UndefinedPixelTrait) WriteOneChannel(psd_info,image_info,image,next_image,compact_pixels, AlphaQuantum,separate,exception); (void) SetImageProgress(image,SaveImagesTag,0,1); } else { if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); if (next_image->compression == RLECompression) { /* Packbits compression. */ (void) WriteBlobMSBShort(image,1); WritePackbitsLength(psd_info,image_info,image,next_image, compact_pixels,RedQuantum,exception); WritePackbitsLength(psd_info,image_info,image,next_image, compact_pixels,GreenQuantum,exception); WritePackbitsLength(psd_info,image_info,image,next_image, compact_pixels,BlueQuantum,exception); if (next_image->colorspace == CMYKColorspace) WritePackbitsLength(psd_info,image_info,image,next_image, compact_pixels,BlackQuantum,exception); if (next_image->alpha_trait != UndefinedPixelTrait) WritePackbitsLength(psd_info,image_info,image,next_image, compact_pixels,AlphaQuantum,exception); } (void) SetImageProgress(image,SaveImagesTag,0,6); WriteOneChannel(psd_info,image_info,image,next_image,compact_pixels, RedQuantum,MagickTrue,exception); (void) SetImageProgress(image,SaveImagesTag,1,6); WriteOneChannel(psd_info,image_info,image,next_image,compact_pixels, GreenQuantum,separate,exception); (void) SetImageProgress(image,SaveImagesTag,2,6); WriteOneChannel(psd_info,image_info,image,next_image,compact_pixels, BlueQuantum,separate,exception); (void) SetImageProgress(image,SaveImagesTag,3,6); if (next_image->colorspace == CMYKColorspace) WriteOneChannel(psd_info,image_info,image,next_image,compact_pixels, BlackQuantum,separate,exception); (void) SetImageProgress(image,SaveImagesTag,4,6); if (next_image->alpha_trait != UndefinedPixelTrait) WriteOneChannel(psd_info,image_info,image,next_image,compact_pixels, AlphaQuantum,separate,exception); (void) SetImageProgress(image,SaveImagesTag,5,6); if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); } if (next_image->compression == RLECompression) compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); return(MagickTrue); } Commit Message: Fixed overflow. CWE ID: CWE-125
static MagickBooleanType WriteImageChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const MagickBooleanType separate,ExceptionInfo *exception) { size_t channels, packet_size; unsigned char *compact_pixels; /* Write uncompressed pixels as separate planes. */ channels=1; packet_size=next_image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) NULL; if (next_image->compression == RLECompression) { compact_pixels=(unsigned char *) AcquireQuantumMemory((2*channels* next_image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } if (IsImageGray(next_image) != MagickFalse) { if (next_image->compression == RLECompression) { /* Packbits compression. */ (void) WriteBlobMSBShort(image,1); WritePackbitsLength(psd_info,image_info,image,next_image, compact_pixels,GrayQuantum,exception); if (next_image->alpha_trait != UndefinedPixelTrait) WritePackbitsLength(psd_info,image_info,image,next_image, compact_pixels,AlphaQuantum,exception); } WriteOneChannel(psd_info,image_info,image,next_image,compact_pixels, GrayQuantum,MagickTrue,exception); if (next_image->alpha_trait != UndefinedPixelTrait) WriteOneChannel(psd_info,image_info,image,next_image,compact_pixels, AlphaQuantum,separate,exception); (void) SetImageProgress(image,SaveImagesTag,0,1); } else if (next_image->storage_class == PseudoClass) { if (next_image->compression == RLECompression) { /* Packbits compression. */ (void) WriteBlobMSBShort(image,1); WritePackbitsLength(psd_info,image_info,image,next_image, compact_pixels,IndexQuantum,exception); if (next_image->alpha_trait != UndefinedPixelTrait) WritePackbitsLength(psd_info,image_info,image,next_image, compact_pixels,AlphaQuantum,exception); } WriteOneChannel(psd_info,image_info,image,next_image,compact_pixels, IndexQuantum,MagickTrue,exception); if (next_image->alpha_trait != UndefinedPixelTrait) WriteOneChannel(psd_info,image_info,image,next_image,compact_pixels, AlphaQuantum,separate,exception); (void) SetImageProgress(image,SaveImagesTag,0,1); } else { if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); if (next_image->compression == RLECompression) { /* Packbits compression. */ (void) WriteBlobMSBShort(image,1); WritePackbitsLength(psd_info,image_info,image,next_image, compact_pixels,RedQuantum,exception); WritePackbitsLength(psd_info,image_info,image,next_image, compact_pixels,GreenQuantum,exception); WritePackbitsLength(psd_info,image_info,image,next_image, compact_pixels,BlueQuantum,exception); if (next_image->colorspace == CMYKColorspace) WritePackbitsLength(psd_info,image_info,image,next_image, compact_pixels,BlackQuantum,exception); if (next_image->alpha_trait != UndefinedPixelTrait) WritePackbitsLength(psd_info,image_info,image,next_image, compact_pixels,AlphaQuantum,exception); } (void) SetImageProgress(image,SaveImagesTag,0,6); WriteOneChannel(psd_info,image_info,image,next_image,compact_pixels, RedQuantum,MagickTrue,exception); (void) SetImageProgress(image,SaveImagesTag,1,6); WriteOneChannel(psd_info,image_info,image,next_image,compact_pixels, GreenQuantum,separate,exception); (void) SetImageProgress(image,SaveImagesTag,2,6); WriteOneChannel(psd_info,image_info,image,next_image,compact_pixels, BlueQuantum,separate,exception); (void) SetImageProgress(image,SaveImagesTag,3,6); if (next_image->colorspace == CMYKColorspace) WriteOneChannel(psd_info,image_info,image,next_image,compact_pixels, BlackQuantum,separate,exception); (void) SetImageProgress(image,SaveImagesTag,4,6); if (next_image->alpha_trait != UndefinedPixelTrait) WriteOneChannel(psd_info,image_info,image,next_image,compact_pixels, AlphaQuantum,separate,exception); (void) SetImageProgress(image,SaveImagesTag,5,6); if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); } if (next_image->compression == RLECompression) compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); return(MagickTrue); }
170,118
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void WallpaperManager::DoSetDefaultWallpaper( const AccountId& account_id, MovableOnDestroyCallbackHolder on_finish) { if (user_manager::UserManager::Get()->IsLoggedInAsKioskApp()) return; wallpaper_cache_.erase(account_id); WallpaperResolution resolution = GetAppropriateResolution(); const bool use_small = (resolution == WALLPAPER_RESOLUTION_SMALL); const base::FilePath* file = NULL; const user_manager::User* user = user_manager::UserManager::Get()->FindUser(account_id); if (user_manager::UserManager::Get()->IsLoggedInAsGuest()) { file = use_small ? &guest_small_wallpaper_file_ : &guest_large_wallpaper_file_; } else if (user && user->GetType() == user_manager::USER_TYPE_CHILD) { file = use_small ? &child_small_wallpaper_file_ : &child_large_wallpaper_file_; } else { file = use_small ? &default_small_wallpaper_file_ : &default_large_wallpaper_file_; } wallpaper::WallpaperLayout layout = use_small ? wallpaper::WALLPAPER_LAYOUT_CENTER : wallpaper::WALLPAPER_LAYOUT_CENTER_CROPPED; DCHECK(file); if (!default_wallpaper_image_.get() || default_wallpaper_image_->file_path() != *file) { default_wallpaper_image_.reset(); if (!file->empty()) { loaded_wallpapers_for_test_++; StartLoadAndSetDefaultWallpaper(*file, layout, std::move(on_finish), &default_wallpaper_image_); return; } CreateSolidDefaultWallpaper(); } if (default_wallpaper_image_->image().width() == 1 && default_wallpaper_image_->image().height() == 1) layout = wallpaper::WALLPAPER_LAYOUT_STRETCH; WallpaperInfo info(default_wallpaper_image_->file_path().value(), layout, wallpaper::DEFAULT, base::Time::Now().LocalMidnight()); SetWallpaper(default_wallpaper_image_->image(), info); } Commit Message: [reland] Do not set default wallpaper unless it should do so. [email protected], [email protected] Bug: 751382 Change-Id: Id0793dfe467f737526a95b1e66ed01fbb8860bda Reviewed-on: https://chromium-review.googlesource.com/619754 Commit-Queue: Xiaoqian Dai <[email protected]> Reviewed-by: Alexander Alekseev <[email protected]> Reviewed-by: Biao She <[email protected]> Cr-Original-Commit-Position: refs/heads/master@{#498325} Reviewed-on: https://chromium-review.googlesource.com/646430 Cr-Commit-Position: refs/heads/master@{#498982} CWE ID: CWE-200
void WallpaperManager::DoSetDefaultWallpaper( const AccountId& account_id, bool update_wallpaper, MovableOnDestroyCallbackHolder on_finish) { if (user_manager::UserManager::Get()->IsLoggedInAsKioskApp()) return; wallpaper_cache_.erase(account_id); WallpaperResolution resolution = GetAppropriateResolution(); const bool use_small = (resolution == WALLPAPER_RESOLUTION_SMALL); const base::FilePath* file = NULL; const user_manager::User* user = user_manager::UserManager::Get()->FindUser(account_id); if (user_manager::UserManager::Get()->IsLoggedInAsGuest()) { file = use_small ? &guest_small_wallpaper_file_ : &guest_large_wallpaper_file_; } else if (user && user->GetType() == user_manager::USER_TYPE_CHILD) { file = use_small ? &child_small_wallpaper_file_ : &child_large_wallpaper_file_; } else { file = use_small ? &default_small_wallpaper_file_ : &default_large_wallpaper_file_; } wallpaper::WallpaperLayout layout = use_small ? wallpaper::WALLPAPER_LAYOUT_CENTER : wallpaper::WALLPAPER_LAYOUT_CENTER_CROPPED; DCHECK(file); if (!default_wallpaper_image_.get() || default_wallpaper_image_->file_path() != *file) { default_wallpaper_image_.reset(); if (!file->empty()) { loaded_wallpapers_for_test_++; StartLoadAndSetDefaultWallpaper(*file, layout, update_wallpaper, std::move(on_finish), &default_wallpaper_image_); return; } CreateSolidDefaultWallpaper(); } if (update_wallpaper) { // 1x1 wallpaper is actually solid color, so it should be stretched. if (default_wallpaper_image_->image().width() == 1 && default_wallpaper_image_->image().height() == 1) { layout = wallpaper::WALLPAPER_LAYOUT_STRETCH; } WallpaperInfo info(default_wallpaper_image_->file_path().value(), layout, wallpaper::DEFAULT, base::Time::Now().LocalMidnight()); SetWallpaper(default_wallpaper_image_->image(), info); } }
171,966
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: extern "C" int EffectCreate(const effect_uuid_t *uuid, int32_t sessionId, int32_t ioId, effect_handle_t *pHandle){ int ret; int i; int length = sizeof(gDescriptors) / sizeof(const effect_descriptor_t *); const effect_descriptor_t *desc; ALOGV("\t\nEffectCreate start"); if (pHandle == NULL || uuid == NULL){ ALOGV("\tLVM_ERROR : EffectCreate() called with NULL pointer"); return -EINVAL; } for (i = 0; i < length; i++) { desc = gDescriptors[i]; if (memcmp(uuid, &desc->uuid, sizeof(effect_uuid_t)) == 0) { ALOGV("\tEffectCreate - UUID matched Reverb type %d, UUID = %x", i, desc->uuid.timeLow); break; } } if (i == length) { return -ENOENT; } ReverbContext *pContext = new ReverbContext; pContext->itfe = &gReverbInterface; pContext->hInstance = NULL; pContext->auxiliary = false; if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY){ pContext->auxiliary = true; ALOGV("\tEffectCreate - AUX"); }else{ ALOGV("\tEffectCreate - INS"); } pContext->preset = false; if (memcmp(&desc->type, SL_IID_PRESETREVERB, sizeof(effect_uuid_t)) == 0) { pContext->preset = true; pContext->curPreset = REVERB_PRESET_LAST + 1; pContext->nextPreset = REVERB_DEFAULT_PRESET; ALOGV("\tEffectCreate - PRESET"); }else{ ALOGV("\tEffectCreate - ENVIRONMENTAL"); } ALOGV("\tEffectCreate - Calling Reverb_init"); ret = Reverb_init(pContext); if (ret < 0){ ALOGV("\tLVM_ERROR : EffectCreate() init failed"); delete pContext; return ret; } *pHandle = (effect_handle_t)pContext; #ifdef LVM_PCM pContext->PcmInPtr = NULL; pContext->PcmOutPtr = NULL; pContext->PcmInPtr = fopen("/data/tmp/reverb_pcm_in.pcm", "w"); pContext->PcmOutPtr = fopen("/data/tmp/reverb_pcm_out.pcm", "w"); if((pContext->PcmInPtr == NULL)|| (pContext->PcmOutPtr == NULL)){ return -EINVAL; } #endif pContext->InFrames32 = (LVM_INT32 *)malloc(LVREV_MAX_FRAME_SIZE * sizeof(LVM_INT32) * 2); pContext->OutFrames32 = (LVM_INT32 *)malloc(LVREV_MAX_FRAME_SIZE * sizeof(LVM_INT32) * 2); ALOGV("\tEffectCreate %p, size %zu", pContext, sizeof(ReverbContext)); ALOGV("\tEffectCreate end\n"); return 0; } /* end EffectCreate */ Commit Message: audio effects: fix heap overflow Check consistency of effect command reply sizes before copying to reply address. Also add null pointer check on reply size. Also remove unused parameter warning. Bug: 21953516. Change-Id: I4cf00c12eaed696af28f3b7613f7e36f47a160c4 (cherry picked from commit 0f714a464d2425afe00d6450535e763131b40844) CWE ID: CWE-119
extern "C" int EffectCreate(const effect_uuid_t *uuid, int32_t sessionId __unused, int32_t ioId __unused, effect_handle_t *pHandle){ int ret; int i; int length = sizeof(gDescriptors) / sizeof(const effect_descriptor_t *); const effect_descriptor_t *desc; ALOGV("\t\nEffectCreate start"); if (pHandle == NULL || uuid == NULL){ ALOGV("\tLVM_ERROR : EffectCreate() called with NULL pointer"); return -EINVAL; } for (i = 0; i < length; i++) { desc = gDescriptors[i]; if (memcmp(uuid, &desc->uuid, sizeof(effect_uuid_t)) == 0) { ALOGV("\tEffectCreate - UUID matched Reverb type %d, UUID = %x", i, desc->uuid.timeLow); break; } } if (i == length) { return -ENOENT; } ReverbContext *pContext = new ReverbContext; pContext->itfe = &gReverbInterface; pContext->hInstance = NULL; pContext->auxiliary = false; if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY){ pContext->auxiliary = true; ALOGV("\tEffectCreate - AUX"); }else{ ALOGV("\tEffectCreate - INS"); } pContext->preset = false; if (memcmp(&desc->type, SL_IID_PRESETREVERB, sizeof(effect_uuid_t)) == 0) { pContext->preset = true; pContext->curPreset = REVERB_PRESET_LAST + 1; pContext->nextPreset = REVERB_DEFAULT_PRESET; ALOGV("\tEffectCreate - PRESET"); }else{ ALOGV("\tEffectCreate - ENVIRONMENTAL"); } ALOGV("\tEffectCreate - Calling Reverb_init"); ret = Reverb_init(pContext); if (ret < 0){ ALOGV("\tLVM_ERROR : EffectCreate() init failed"); delete pContext; return ret; } *pHandle = (effect_handle_t)pContext; #ifdef LVM_PCM pContext->PcmInPtr = NULL; pContext->PcmOutPtr = NULL; pContext->PcmInPtr = fopen("/data/tmp/reverb_pcm_in.pcm", "w"); pContext->PcmOutPtr = fopen("/data/tmp/reverb_pcm_out.pcm", "w"); if((pContext->PcmInPtr == NULL)|| (pContext->PcmOutPtr == NULL)){ return -EINVAL; } #endif pContext->InFrames32 = (LVM_INT32 *)malloc(LVREV_MAX_FRAME_SIZE * sizeof(LVM_INT32) * 2); pContext->OutFrames32 = (LVM_INT32 *)malloc(LVREV_MAX_FRAME_SIZE * sizeof(LVM_INT32) * 2); ALOGV("\tEffectCreate %p, size %zu", pContext, sizeof(ReverbContext)); ALOGV("\tEffectCreate end\n"); return 0; } /* end EffectCreate */
173,349
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int create_user_ns(struct cred *new) { struct user_namespace *ns, *parent_ns = new->user_ns; kuid_t owner = new->euid; kgid_t group = new->egid; int ret; /* The creator needs a mapping in the parent user namespace * or else we won't be able to reasonably tell userspace who * created a user_namespace. */ if (!kuid_has_mapping(parent_ns, owner) || !kgid_has_mapping(parent_ns, group)) return -EPERM; ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL); if (!ns) return -ENOMEM; ret = proc_alloc_inum(&ns->proc_inum); if (ret) { kmem_cache_free(user_ns_cachep, ns); return ret; } atomic_set(&ns->count, 1); /* Leave the new->user_ns reference with the new user namespace. */ ns->parent = parent_ns; ns->owner = owner; ns->group = group; set_cred_user_ns(new, ns); return 0; } Commit Message: userns: Don't allow creation if the user is chrooted Guarantee that the policy of which files may be access that is established by setting the root directory will not be violated by user namespaces by verifying that the root directory points to the root of the mount namespace at the time of user namespace creation. Changing the root is a privileged operation, and as a matter of policy it serves to limit unprivileged processes to files below the current root directory. For reasons of simplicity and comprehensibility the privilege to change the root directory is gated solely on the CAP_SYS_CHROOT capability in the user namespace. Therefore when creating a user namespace we must ensure that the policy of which files may be access can not be violated by changing the root directory. Anyone who runs a processes in a chroot and would like to use user namespace can setup the same view of filesystems with a mount namespace instead. With this result that this is not a practical limitation for using user namespaces. Cc: [email protected] Acked-by: Serge Hallyn <[email protected]> Reported-by: Andy Lutomirski <[email protected]> Signed-off-by: "Eric W. Biederman" <[email protected]> CWE ID: CWE-264
int create_user_ns(struct cred *new) { struct user_namespace *ns, *parent_ns = new->user_ns; kuid_t owner = new->euid; kgid_t group = new->egid; int ret; /* * Verify that we can not violate the policy of which files * may be accessed that is specified by the root directory, * by verifing that the root directory is at the root of the * mount namespace which allows all files to be accessed. */ if (current_chrooted()) return -EPERM; /* The creator needs a mapping in the parent user namespace * or else we won't be able to reasonably tell userspace who * created a user_namespace. */ if (!kuid_has_mapping(parent_ns, owner) || !kgid_has_mapping(parent_ns, group)) return -EPERM; ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL); if (!ns) return -ENOMEM; ret = proc_alloc_inum(&ns->proc_inum); if (ret) { kmem_cache_free(user_ns_cachep, ns); return ret; } atomic_set(&ns->count, 1); /* Leave the new->user_ns reference with the new user namespace. */ ns->parent = parent_ns; ns->owner = owner; ns->group = group; set_cred_user_ns(new, ns); return 0; }
166,097
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) { struct k_itimer *timr; unsigned long flags; int si_private = 0; enum hrtimer_restart ret = HRTIMER_NORESTART; timr = container_of(timer, struct k_itimer, it.real.timer); spin_lock_irqsave(&timr->it_lock, flags); timr->it_active = 0; if (timr->it_interval != 0) si_private = ++timr->it_requeue_pending; if (posix_timer_event(timr, si_private)) { /* * signal was not sent because of sig_ignor * we will not get a call back to restart it AND * it should be restarted. */ if (timr->it_interval != 0) { ktime_t now = hrtimer_cb_get_time(timer); /* * FIXME: What we really want, is to stop this * timer completely and restart it in case the * SIG_IGN is removed. This is a non trivial * change which involves sighand locking * (sigh !), which we don't want to do late in * the release cycle. * * For now we just let timers with an interval * less than a jiffie expire every jiffie to * avoid softirq starvation in case of SIG_IGN * and a very small interval, which would put * the timer right back on the softirq pending * list. By moving now ahead of time we trick * hrtimer_forward() to expire the timer * later, while we still maintain the overrun * accuracy, but have some inconsistency in * the timer_gettime() case. This is at least * better than a starved softirq. A more * complex fix which solves also another related * inconsistency is already in the pipeline. */ #ifdef CONFIG_HIGH_RES_TIMERS { ktime_t kj = NSEC_PER_SEC / HZ; if (timr->it_interval < kj) now = ktime_add(now, kj); } #endif timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, timr->it_interval); ret = HRTIMER_RESTART; ++timr->it_requeue_pending; timr->it_active = 1; } } unlock_timer(timr, flags); return ret; } Commit Message: posix-timers: Sanitize overrun handling The posix timer overrun handling is broken because the forwarding functions can return a huge number of overruns which does not fit in an int. As a consequence timer_getoverrun(2) and siginfo::si_overrun can turn into random number generators. The k_clock::timer_forward() callbacks return a 64 bit value now. Make k_itimer::ti_overrun[_last] 64bit as well, so the kernel internal accounting is correct. 3Remove the temporary (int) casts. Add a helper function which clamps the overrun value returned to user space via timer_getoverrun(2) or siginfo::si_overrun limited to a positive value between 0 and INT_MAX. INT_MAX is an indicator for user space that the overrun value has been clamped. Reported-by: Team OWL337 <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Acked-by: John Stultz <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Michael Kerrisk <[email protected]> Link: https://lkml.kernel.org/r/[email protected] CWE ID: CWE-190
static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) { struct k_itimer *timr; unsigned long flags; int si_private = 0; enum hrtimer_restart ret = HRTIMER_NORESTART; timr = container_of(timer, struct k_itimer, it.real.timer); spin_lock_irqsave(&timr->it_lock, flags); timr->it_active = 0; if (timr->it_interval != 0) si_private = ++timr->it_requeue_pending; if (posix_timer_event(timr, si_private)) { /* * signal was not sent because of sig_ignor * we will not get a call back to restart it AND * it should be restarted. */ if (timr->it_interval != 0) { ktime_t now = hrtimer_cb_get_time(timer); /* * FIXME: What we really want, is to stop this * timer completely and restart it in case the * SIG_IGN is removed. This is a non trivial * change which involves sighand locking * (sigh !), which we don't want to do late in * the release cycle. * * For now we just let timers with an interval * less than a jiffie expire every jiffie to * avoid softirq starvation in case of SIG_IGN * and a very small interval, which would put * the timer right back on the softirq pending * list. By moving now ahead of time we trick * hrtimer_forward() to expire the timer * later, while we still maintain the overrun * accuracy, but have some inconsistency in * the timer_gettime() case. This is at least * better than a starved softirq. A more * complex fix which solves also another related * inconsistency is already in the pipeline. */ #ifdef CONFIG_HIGH_RES_TIMERS { ktime_t kj = NSEC_PER_SEC / HZ; if (timr->it_interval < kj) now = ktime_add(now, kj); } #endif timr->it_overrun += hrtimer_forward(timer, now, timr->it_interval); ret = HRTIMER_RESTART; ++timr->it_requeue_pending; timr->it_active = 1; } } unlock_timer(timr, flags); return ret; }
169,182
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: GF_Err gf_sm_load_init(GF_SceneLoader *load) { GF_Err e = GF_NOT_SUPPORTED; char *ext, szExt[50]; /*we need at least a scene graph*/ if (!load || (!load->ctx && !load->scene_graph) #ifndef GPAC_DISABLE_ISOM || (!load->fileName && !load->isom && !(load->flags & GF_SM_LOAD_FOR_PLAYBACK) ) #endif ) return GF_BAD_PARAM; if (!load->type) { #ifndef GPAC_DISABLE_ISOM if (load->isom) { load->type = GF_SM_LOAD_MP4; } else #endif { ext = (char *)strrchr(load->fileName, '.'); if (!ext) return GF_NOT_SUPPORTED; if (!stricmp(ext, ".gz")) { char *anext; ext[0] = 0; anext = (char *)strrchr(load->fileName, '.'); ext[0] = '.'; ext = anext; } strcpy(szExt, &ext[1]); strlwr(szExt); if (strstr(szExt, "bt")) load->type = GF_SM_LOAD_BT; else if (strstr(szExt, "wrl")) load->type = GF_SM_LOAD_VRML; else if (strstr(szExt, "x3dv")) load->type = GF_SM_LOAD_X3DV; #ifndef GPAC_DISABLE_LOADER_XMT else if (strstr(szExt, "xmt") || strstr(szExt, "xmta")) load->type = GF_SM_LOAD_XMTA; else if (strstr(szExt, "x3d")) load->type = GF_SM_LOAD_X3D; #endif else if (strstr(szExt, "swf")) load->type = GF_SM_LOAD_SWF; else if (strstr(szExt, "mov")) load->type = GF_SM_LOAD_QT; else if (strstr(szExt, "svg")) load->type = GF_SM_LOAD_SVG; else if (strstr(szExt, "xsr")) load->type = GF_SM_LOAD_XSR; else if (strstr(szExt, "xbl")) load->type = GF_SM_LOAD_XBL; else if (strstr(szExt, "xml")) { char *rtype = gf_xml_get_root_type(load->fileName, &e); if (rtype) { if (!strcmp(rtype, "SAFSession")) load->type = GF_SM_LOAD_XSR; else if (!strcmp(rtype, "XMT-A")) load->type = GF_SM_LOAD_XMTA; else if (!strcmp(rtype, "X3D")) load->type = GF_SM_LOAD_X3D; else if (!strcmp(rtype, "bindings")) load->type = GF_SM_LOAD_XBL; gf_free(rtype); } } } } if (!load->type) return e; if (!load->scene_graph) load->scene_graph = load->ctx->scene_graph; switch (load->type) { #ifndef GPAC_DISABLE_LOADER_BT case GF_SM_LOAD_BT: case GF_SM_LOAD_VRML: case GF_SM_LOAD_X3DV: return gf_sm_load_init_bt(load); #endif #ifndef GPAC_DISABLE_LOADER_XMT case GF_SM_LOAD_XMTA: case GF_SM_LOAD_X3D: return gf_sm_load_init_xmt(load); #endif #ifndef GPAC_DISABLE_SVG case GF_SM_LOAD_SVG: case GF_SM_LOAD_XSR: case GF_SM_LOAD_DIMS: return gf_sm_load_init_svg(load); case GF_SM_LOAD_XBL: e = gf_sm_load_init_xbl(load); load->process = gf_sm_load_run_xbl; load->done = gf_sm_load_done_xbl; return e; #endif #ifndef GPAC_DISABLE_SWF_IMPORT case GF_SM_LOAD_SWF: return gf_sm_load_init_swf(load); #endif #ifndef GPAC_DISABLE_LOADER_ISOM case GF_SM_LOAD_MP4: return gf_sm_load_init_isom(load); #endif #ifndef GPAC_DISABLE_QTVR case GF_SM_LOAD_QT: return gf_sm_load_init_qt(load); #endif default: return GF_NOT_SUPPORTED; } return GF_NOT_SUPPORTED; } Commit Message: fix some overflows due to strcpy fixes #1184, #1186, #1187 among other things CWE ID: CWE-119
GF_Err gf_sm_load_init(GF_SceneLoader *load) { GF_Err e = GF_NOT_SUPPORTED; char *ext, szExt[50]; /*we need at least a scene graph*/ if (!load || (!load->ctx && !load->scene_graph) #ifndef GPAC_DISABLE_ISOM || (!load->fileName && !load->isom && !(load->flags & GF_SM_LOAD_FOR_PLAYBACK) ) #endif ) return GF_BAD_PARAM; if (!load->type) { #ifndef GPAC_DISABLE_ISOM if (load->isom) { load->type = GF_SM_LOAD_MP4; } else #endif { ext = (char *)strrchr(load->fileName, '.'); if (!ext) return GF_NOT_SUPPORTED; if (!stricmp(ext, ".gz")) { char *anext; ext[0] = 0; anext = (char *)strrchr(load->fileName, '.'); ext[0] = '.'; ext = anext; } if (strlen(ext) < 2 || strlen(ext) > sizeof(szExt)) { GF_LOG(GF_LOG_ERROR, GF_LOG_SCENE, ("[Scene Manager] invalid extension in file name %s\n", load->fileName)); return GF_NOT_SUPPORTED; } strcpy(szExt, &ext[1]); strlwr(szExt); if (strstr(szExt, "bt")) load->type = GF_SM_LOAD_BT; else if (strstr(szExt, "wrl")) load->type = GF_SM_LOAD_VRML; else if (strstr(szExt, "x3dv")) load->type = GF_SM_LOAD_X3DV; #ifndef GPAC_DISABLE_LOADER_XMT else if (strstr(szExt, "xmt") || strstr(szExt, "xmta")) load->type = GF_SM_LOAD_XMTA; else if (strstr(szExt, "x3d")) load->type = GF_SM_LOAD_X3D; #endif else if (strstr(szExt, "swf")) load->type = GF_SM_LOAD_SWF; else if (strstr(szExt, "mov")) load->type = GF_SM_LOAD_QT; else if (strstr(szExt, "svg")) load->type = GF_SM_LOAD_SVG; else if (strstr(szExt, "xsr")) load->type = GF_SM_LOAD_XSR; else if (strstr(szExt, "xbl")) load->type = GF_SM_LOAD_XBL; else if (strstr(szExt, "xml")) { char *rtype = gf_xml_get_root_type(load->fileName, &e); if (rtype) { if (!strcmp(rtype, "SAFSession")) load->type = GF_SM_LOAD_XSR; else if (!strcmp(rtype, "XMT-A")) load->type = GF_SM_LOAD_XMTA; else if (!strcmp(rtype, "X3D")) load->type = GF_SM_LOAD_X3D; else if (!strcmp(rtype, "bindings")) load->type = GF_SM_LOAD_XBL; gf_free(rtype); } } } } if (!load->type) return e; if (!load->scene_graph) load->scene_graph = load->ctx->scene_graph; switch (load->type) { #ifndef GPAC_DISABLE_LOADER_BT case GF_SM_LOAD_BT: case GF_SM_LOAD_VRML: case GF_SM_LOAD_X3DV: return gf_sm_load_init_bt(load); #endif #ifndef GPAC_DISABLE_LOADER_XMT case GF_SM_LOAD_XMTA: case GF_SM_LOAD_X3D: return gf_sm_load_init_xmt(load); #endif #ifndef GPAC_DISABLE_SVG case GF_SM_LOAD_SVG: case GF_SM_LOAD_XSR: case GF_SM_LOAD_DIMS: return gf_sm_load_init_svg(load); case GF_SM_LOAD_XBL: e = gf_sm_load_init_xbl(load); load->process = gf_sm_load_run_xbl; load->done = gf_sm_load_done_xbl; return e; #endif #ifndef GPAC_DISABLE_SWF_IMPORT case GF_SM_LOAD_SWF: return gf_sm_load_init_swf(load); #endif #ifndef GPAC_DISABLE_LOADER_ISOM case GF_SM_LOAD_MP4: return gf_sm_load_init_isom(load); #endif #ifndef GPAC_DISABLE_QTVR case GF_SM_LOAD_QT: return gf_sm_load_init_qt(load); #endif default: return GF_NOT_SUPPORTED; } return GF_NOT_SUPPORTED; }
169,793
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int do_new_mount(struct path *path, const char *fstype, int flags, int mnt_flags, const char *name, void *data) { struct file_system_type *type; struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; struct vfsmount *mnt; int err; if (!fstype) return -EINVAL; type = get_fs_type(fstype); if (!type) return -ENODEV; if (user_ns != &init_user_ns) { if (!(type->fs_flags & FS_USERNS_MOUNT)) { put_filesystem(type); return -EPERM; } /* Only in special cases allow devices from mounts * created outside the initial user namespace. */ if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) { flags |= MS_NODEV; mnt_flags |= MNT_NODEV; } } mnt = vfs_kern_mount(type, flags, name, data); if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) && !mnt->mnt_sb->s_subtype) mnt = fs_set_subtype(mnt, fstype); put_filesystem(type); if (IS_ERR(mnt)) return PTR_ERR(mnt); err = do_add_mount(real_mount(mnt), path, mnt_flags); if (err) mntput(mnt); return err; } Commit Message: mnt: Correct permission checks in do_remount While invesgiating the issue where in "mount --bind -oremount,ro ..." would result in later "mount --bind -oremount,rw" succeeding even if the mount started off locked I realized that there are several additional mount flags that should be locked and are not. In particular MNT_NOSUID, MNT_NODEV, MNT_NOEXEC, and the atime flags in addition to MNT_READONLY should all be locked. These flags are all per superblock, can all be changed with MS_BIND, and should not be changable if set by a more privileged user. The following additions to the current logic are added in this patch. - nosuid may not be clearable by a less privileged user. - nodev may not be clearable by a less privielged user. - noexec may not be clearable by a less privileged user. - atime flags may not be changeable by a less privileged user. The logic with atime is that always setting atime on access is a global policy and backup software and auditing software could break if atime bits are not updated (when they are configured to be updated), and serious performance degradation could result (DOS attack) if atime updates happen when they have been explicitly disabled. Therefore an unprivileged user should not be able to mess with the atime bits set by a more privileged user. The additional restrictions are implemented with the addition of MNT_LOCK_NOSUID, MNT_LOCK_NODEV, MNT_LOCK_NOEXEC, and MNT_LOCK_ATIME mnt flags. Taken together these changes and the fixes for MNT_LOCK_READONLY should make it safe for an unprivileged user to create a user namespace and to call "mount --bind -o remount,... ..." without the danger of mount flags being changed maliciously. Cc: [email protected] Acked-by: Serge E. Hallyn <[email protected]> Signed-off-by: "Eric W. Biederman" <[email protected]> CWE ID: CWE-264
static int do_new_mount(struct path *path, const char *fstype, int flags, int mnt_flags, const char *name, void *data) { struct file_system_type *type; struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; struct vfsmount *mnt; int err; if (!fstype) return -EINVAL; type = get_fs_type(fstype); if (!type) return -ENODEV; if (user_ns != &init_user_ns) { if (!(type->fs_flags & FS_USERNS_MOUNT)) { put_filesystem(type); return -EPERM; } /* Only in special cases allow devices from mounts * created outside the initial user namespace. */ if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) { flags |= MS_NODEV; mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV; } } mnt = vfs_kern_mount(type, flags, name, data); if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) && !mnt->mnt_sb->s_subtype) mnt = fs_set_subtype(mnt, fstype); put_filesystem(type); if (IS_ERR(mnt)) return PTR_ERR(mnt); err = do_add_mount(real_mount(mnt), path, mnt_flags); if (err) mntput(mnt); return err; }
166,281
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int jas_iccgetuint32(jas_stream_t *in, jas_iccuint32_t *val) { ulonglong tmp; if (jas_iccgetuint(in, 4, &tmp)) return -1; *val = tmp; return 0; } Commit Message: The generation of the configuration file jas_config.h has been completely reworked in order to avoid pollution of the global namespace. Some problematic types like uchar, ulong, and friends have been replaced with names with a jas_ prefix. An option max_samples has been added to the BMP and JPEG decoders to restrict the maximum size of image that they can decode. This change was made as a (possibly temporary) fix to address security concerns. A max_samples command-line option has also been added to imginfo. Whether an image component (for jas_image_t) is stored in memory or on disk is now based on the component size (rather than the image size). Some debug log message were added. Some new integer overflow checks were added. Some new safe integer add/multiply functions were added. More pre-C99 cruft was removed. JasPer has numerous "hacks" to handle pre-C99 compilers. JasPer now assumes C99 support. So, this pre-C99 cruft is unnecessary and can be removed. The regression jasper-doublefree-mem_close.jpg has been re-enabled. Theoretically, it should work more predictably now. CWE ID: CWE-190
static int jas_iccgetuint32(jas_stream_t *in, jas_iccuint32_t *val) { jas_ulonglong tmp; if (jas_iccgetuint(in, 4, &tmp)) return -1; *val = tmp; return 0; }
168,686
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: WORD32 ih264d_mark_err_slice_skip(dec_struct_t * ps_dec, WORD32 num_mb_skip, UWORD8 u1_is_idr_slice, UWORD16 u2_frame_num, pocstruct_t *ps_cur_poc, WORD32 prev_slice_err) { WORD32 i2_cur_mb_addr; UWORD32 u1_num_mbs, u1_num_mbsNby2; UWORD32 u1_mb_idx = ps_dec->u1_mb_idx; UWORD32 i2_mb_skip_run; UWORD32 u1_num_mbs_next, u1_end_of_row; const UWORD32 i2_pic_wdin_mbs = ps_dec->u2_frm_wd_in_mbs; UWORD32 u1_slice_end; UWORD32 u1_tfr_n_mb; UWORD32 u1_decode_nmb; dec_bit_stream_t * const ps_bitstrm = ps_dec->ps_bitstrm; dec_slice_params_t * ps_slice = ps_dec->ps_cur_slice; UWORD32 *pu4_bitstrm_buf = ps_bitstrm->pu4_buffer; UWORD32 *pu4_bitstrm_ofst = &ps_bitstrm->u4_ofst; deblk_mb_t *ps_cur_deblk_mb; dec_mb_info_t *ps_cur_mb_info; parse_pmbarams_t *ps_parse_mb_data; UWORD32 u1_inter_mb_type; UWORD32 u1_deblk_mb_type; UWORD16 u2_total_mbs_coded; UWORD32 u1_mbaff = ps_slice->u1_mbaff_frame_flag; parse_part_params_t *ps_part_info; WORD32 ret; if(ps_dec->ps_dec_err_status->u1_err_flag & REJECT_CUR_PIC) { ih264d_err_pic_dispbuf_mgr(ps_dec); return 0; } if(ps_dec->ps_cur_slice->u1_mbaff_frame_flag && (num_mb_skip & 1)) { num_mb_skip++; } ps_dec->ps_dpb_cmds->u1_long_term_reference_flag = 0; if(prev_slice_err == 1) { /* first slice - missing/header corruption */ ps_dec->ps_cur_slice->u2_frame_num = u2_frame_num; if(!ps_dec->u1_first_slice_in_stream) { ih264d_end_of_pic(ps_dec, u1_is_idr_slice, ps_dec->ps_cur_slice->u2_frame_num); ps_dec->s_cur_pic_poc.u2_frame_num = ps_dec->ps_cur_slice->u2_frame_num; } { WORD32 i, j, poc = 0; ps_dec->ps_cur_slice->u2_first_mb_in_slice = 0; ps_dec->pf_mvpred = ih264d_mvpred_nonmbaff; ps_dec->p_form_mb_part_info = ih264d_form_mb_part_info_bp; ps_dec->p_motion_compensate = ih264d_motion_compensate_bp; if(ps_dec->ps_cur_pic != NULL) poc = ps_dec->ps_cur_pic->i4_poc + 2; j = -1; for(i = 0; i < MAX_NUM_PIC_PARAMS; i++) { if(ps_dec->ps_pps[i].u1_is_valid == TRUE) { if(ps_dec->ps_pps[i].ps_sps->u1_is_valid == TRUE) { j = i; break; } } } if(j == -1) { return ERROR_INV_SPS_PPS_T; } /* call ih264d_start_of_pic only if it was not called earlier*/ if(ps_dec->u4_pic_buf_got == 0) { ps_dec->ps_cur_slice->u1_slice_type = P_SLICE; ps_dec->ps_cur_slice->u1_nal_ref_idc = 1; ps_dec->ps_cur_slice->u1_nal_unit_type = 1; ret = ih264d_start_of_pic(ps_dec, poc, ps_cur_poc, ps_dec->ps_cur_slice->u2_frame_num, &ps_dec->ps_pps[j]); if(ret != OK) { return ret; } } ps_dec->ps_ref_pic_buf_lx[0][0]->u1_pic_buf_id = 0; ps_dec->u4_output_present = 0; { ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer, &(ps_dec->s_disp_op)); /* If error code is non-zero then there is no buffer available for display, hence avoid format conversion */ if(0 != ps_dec->s_disp_op.u4_error_code) { ps_dec->u4_fmt_conv_cur_row = ps_dec->s_disp_frame_info.u4_y_ht; } else ps_dec->u4_output_present = 1; } if(ps_dec->u1_separate_parse == 1) { if(ps_dec->u4_dec_thread_created == 0) { ithread_create(ps_dec->pv_dec_thread_handle, NULL, (void *)ih264d_decode_picture_thread, (void *)ps_dec); ps_dec->u4_dec_thread_created = 1; } if((ps_dec->u4_num_cores == 3) && ((ps_dec->u4_app_disable_deblk_frm == 0) || ps_dec->i1_recon_in_thread3_flag) && (ps_dec->u4_bs_deblk_thread_created == 0)) { ps_dec->u4_start_recon_deblk = 0; ithread_create(ps_dec->pv_bs_deblk_thread_handle, NULL, (void *)ih264d_recon_deblk_thread, (void *)ps_dec); ps_dec->u4_bs_deblk_thread_created = 1; } } } ps_dec->u4_first_slice_in_pic = 0; } else { dec_slice_struct_t *ps_parse_cur_slice; ps_parse_cur_slice = ps_dec->ps_dec_slice_buf + ps_dec->u2_cur_slice_num; if(ps_dec->u1_slice_header_done && ps_parse_cur_slice == ps_dec->ps_parse_cur_slice) { if((u1_mbaff) && (ps_dec->u4_num_mbs_cur_nmb & 1)) { ps_dec->u4_num_mbs_cur_nmb = ps_dec->u4_num_mbs_cur_nmb - 1; ps_dec->u2_cur_mb_addr--; } u1_num_mbs = ps_dec->u4_num_mbs_cur_nmb; if(u1_num_mbs) { ps_cur_mb_info = ps_dec->ps_nmb_info + u1_num_mbs - 1; } else { if(ps_dec->u1_separate_parse) { ps_cur_mb_info = ps_dec->ps_nmb_info; } else { ps_cur_mb_info = ps_dec->ps_nmb_info + ps_dec->u4_num_mbs_prev_nmb - 1; } } ps_dec->u2_mby = ps_cur_mb_info->u2_mby; ps_dec->u2_mbx = ps_cur_mb_info->u2_mbx; ps_dec->u1_mb_ngbr_availablity = ps_cur_mb_info->u1_mb_ngbr_availablity; if(u1_num_mbs) { ps_dec->pv_parse_tu_coeff_data = ps_dec->pv_prev_mb_parse_tu_coeff_data; ps_dec->u2_cur_mb_addr--; ps_dec->i4_submb_ofst -= SUB_BLK_SIZE; if (ps_dec->u1_pr_sl_type == P_SLICE || ps_dec->u1_pr_sl_type == B_SLICE) { ps_dec->pf_mvpred_ref_tfr_nby2mb(ps_dec, u1_mb_idx, u1_num_mbs); ps_dec->ps_part = ps_dec->ps_parse_part_params; } u1_num_mbs_next = i2_pic_wdin_mbs - ps_dec->u2_mbx - 1; u1_end_of_row = (!u1_num_mbs_next) && (!(u1_mbaff && (u1_num_mbs & 0x01))); u1_slice_end = 1; u1_tfr_n_mb = 1; ps_cur_mb_info->u1_end_of_slice = u1_slice_end; if(ps_dec->u1_separate_parse) { ih264d_parse_tfr_nmb(ps_dec, u1_mb_idx, u1_num_mbs, u1_num_mbs_next, u1_tfr_n_mb, u1_end_of_row); ps_dec->ps_nmb_info += u1_num_mbs; } else { ih264d_decode_recon_tfr_nmb(ps_dec, u1_mb_idx, u1_num_mbs, u1_num_mbs_next, u1_tfr_n_mb, u1_end_of_row); } ps_dec->u2_total_mbs_coded += u1_num_mbs; ps_dec->u1_mb_idx = 0; ps_dec->u4_num_mbs_cur_nmb = 0; } if(ps_dec->u2_total_mbs_coded >= ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs) { ps_dec->u1_pic_decode_done = 1; return 0; } /* Inserting new slice only if the current slice has atleast 1 MB*/ if(ps_dec->ps_parse_cur_slice->u4_first_mb_in_slice < (UWORD32)(ps_dec->u2_total_mbs_coded >> ps_slice->u1_mbaff_frame_flag)) { ps_dec->i2_prev_slice_mbx = ps_dec->u2_mbx; ps_dec->i2_prev_slice_mby = ps_dec->u2_mby; ps_dec->u2_cur_slice_num++; ps_dec->ps_parse_cur_slice++; } } else { ps_dec->ps_parse_cur_slice = ps_dec->ps_dec_slice_buf + ps_dec->u2_cur_slice_num; } } /******************************************************/ /* Initializations to new slice */ /******************************************************/ { WORD32 num_entries; WORD32 size; UWORD8 *pu1_buf; num_entries = MIN(MAX_FRAMES, ps_dec->u4_num_ref_frames_at_init); num_entries = 2 * ((2 * num_entries) + 1); size = num_entries * sizeof(void *); size += PAD_MAP_IDX_POC * sizeof(void *); pu1_buf = (UWORD8 *)ps_dec->pv_map_ref_idx_to_poc_buf; pu1_buf += size * ps_dec->u2_cur_slice_num; ps_dec->ps_parse_cur_slice->ppv_map_ref_idx_to_poc = (volatile void **)pu1_buf; } ps_dec->ps_cur_slice->u2_first_mb_in_slice = ps_dec->u2_total_mbs_coded >> u1_mbaff; ps_dec->ps_cur_slice->i1_slice_alpha_c0_offset = 0; ps_dec->ps_cur_slice->i1_slice_beta_offset = 0; if(ps_dec->ps_cur_slice->u1_field_pic_flag) ps_dec->u2_prv_frame_num = ps_dec->ps_cur_slice->u2_frame_num; ps_dec->ps_parse_cur_slice->u4_first_mb_in_slice = ps_dec->u2_total_mbs_coded >> u1_mbaff; ps_dec->ps_parse_cur_slice->u2_log2Y_crwd = ps_dec->ps_cur_slice->u2_log2Y_crwd; if(ps_dec->u1_separate_parse) { ps_dec->ps_parse_cur_slice->pv_tu_coeff_data_start = ps_dec->pv_parse_tu_coeff_data; } else { ps_dec->pv_proc_tu_coeff_data = ps_dec->pv_parse_tu_coeff_data; } /******************************************************/ /* Initializations specific to P slice */ /******************************************************/ u1_inter_mb_type = P_MB; u1_deblk_mb_type = D_INTER_MB; ps_dec->ps_cur_slice->u1_slice_type = P_SLICE; ps_dec->ps_parse_cur_slice->slice_type = P_SLICE; ps_dec->pf_mvpred_ref_tfr_nby2mb = ih264d_mv_pred_ref_tfr_nby2_pmb; ps_dec->ps_part = ps_dec->ps_parse_part_params; ps_dec->u2_mbx = (MOD(ps_dec->ps_cur_slice->u2_first_mb_in_slice - 1, ps_dec->u2_frm_wd_in_mbs)); ps_dec->u2_mby = (DIV(ps_dec->ps_cur_slice->u2_first_mb_in_slice - 1, ps_dec->u2_frm_wd_in_mbs)); ps_dec->u2_mby <<= u1_mbaff; /******************************************************/ /* Parsing / decoding the slice */ /******************************************************/ ps_dec->u1_slice_header_done = 2; ps_dec->u1_qp = ps_slice->u1_slice_qp; ih264d_update_qp(ps_dec, 0); u1_mb_idx = ps_dec->u1_mb_idx; ps_parse_mb_data = ps_dec->ps_parse_mb_data; u1_num_mbs = u1_mb_idx; u1_slice_end = 0; u1_tfr_n_mb = 0; u1_decode_nmb = 0; u1_num_mbsNby2 = 0; i2_cur_mb_addr = ps_dec->u2_total_mbs_coded; i2_mb_skip_run = num_mb_skip; while(!u1_slice_end) { UWORD8 u1_mb_type; if(i2_cur_mb_addr > ps_dec->ps_cur_sps->u2_max_mb_addr) break; ps_cur_mb_info = ps_dec->ps_nmb_info + u1_num_mbs; ps_dec->u4_num_mbs_cur_nmb = u1_num_mbs; ps_cur_mb_info->u1_Mux = 0; ps_dec->u4_num_pmbair = (u1_num_mbs >> u1_mbaff); ps_cur_deblk_mb = ps_dec->ps_deblk_mbn + u1_num_mbs; ps_cur_mb_info->u1_end_of_slice = 0; /* Storing Default partition info */ ps_parse_mb_data->u1_num_part = 1; ps_parse_mb_data->u1_isI_mb = 0; /**************************************************************/ /* Get the required information for decoding of MB */ /**************************************************************/ /* mb_x, mb_y, neighbor availablity, */ if (u1_mbaff) ih264d_get_mb_info_cavlc_mbaff(ps_dec, i2_cur_mb_addr, ps_cur_mb_info, i2_mb_skip_run); else ih264d_get_mb_info_cavlc_nonmbaff(ps_dec, i2_cur_mb_addr, ps_cur_mb_info, i2_mb_skip_run); /* Set the deblocking parameters for this MB */ if(ps_dec->u4_app_disable_deblk_frm == 0) { ih264d_set_deblocking_parameters(ps_cur_deblk_mb, ps_slice, ps_dec->u1_mb_ngbr_availablity, ps_dec->u1_cur_mb_fld_dec_flag); } /* Set appropriate flags in ps_cur_mb_info and ps_dec */ ps_dec->i1_prev_mb_qp_delta = 0; ps_dec->u1_sub_mb_num = 0; ps_cur_mb_info->u1_mb_type = MB_SKIP; ps_cur_mb_info->u1_mb_mc_mode = PRED_16x16; ps_cur_mb_info->u1_cbp = 0; /* Storing Skip partition info */ ps_part_info = ps_dec->ps_part; ps_part_info->u1_is_direct = PART_DIRECT_16x16; ps_part_info->u1_sub_mb_num = 0; ps_dec->ps_part++; /* Update Nnzs */ ih264d_update_nnz_for_skipmb(ps_dec, ps_cur_mb_info, CAVLC); ps_cur_mb_info->ps_curmb->u1_mb_type = u1_inter_mb_type; ps_cur_deblk_mb->u1_mb_type |= u1_deblk_mb_type; i2_mb_skip_run--; ps_cur_deblk_mb->u1_mb_qp = ps_dec->u1_qp; if (u1_mbaff) { ih264d_update_mbaff_left_nnz(ps_dec, ps_cur_mb_info); } /**************************************************************/ /* Get next Macroblock address */ /**************************************************************/ i2_cur_mb_addr++; u1_num_mbs++; u1_num_mbsNby2++; ps_parse_mb_data++; /****************************************************************/ /* Check for End Of Row and other flags that determine when to */ /* do DMA setup for N/2-Mb, Decode for N-Mb, and Transfer for */ /* N-Mb */ /****************************************************************/ u1_num_mbs_next = i2_pic_wdin_mbs - ps_dec->u2_mbx - 1; u1_end_of_row = (!u1_num_mbs_next) && (!(u1_mbaff && (u1_num_mbs & 0x01))); u1_slice_end = !i2_mb_skip_run; u1_tfr_n_mb = (u1_num_mbs == ps_dec->u1_recon_mb_grp) || u1_end_of_row || u1_slice_end; u1_decode_nmb = u1_tfr_n_mb || u1_slice_end; ps_cur_mb_info->u1_end_of_slice = u1_slice_end; if(u1_decode_nmb) { ps_dec->pf_mvpred_ref_tfr_nby2mb(ps_dec, u1_mb_idx, u1_num_mbs); u1_num_mbsNby2 = 0; ps_parse_mb_data = ps_dec->ps_parse_mb_data; ps_dec->ps_part = ps_dec->ps_parse_part_params; if(ps_dec->u1_separate_parse) { ih264d_parse_tfr_nmb(ps_dec, u1_mb_idx, u1_num_mbs, u1_num_mbs_next, u1_tfr_n_mb, u1_end_of_row); ps_dec->ps_nmb_info += u1_num_mbs; } else { ih264d_decode_recon_tfr_nmb(ps_dec, u1_mb_idx, u1_num_mbs, u1_num_mbs_next, u1_tfr_n_mb, u1_end_of_row); } ps_dec->u2_total_mbs_coded += u1_num_mbs; if(u1_tfr_n_mb) u1_num_mbs = 0; u1_mb_idx = u1_num_mbs; ps_dec->u1_mb_idx = u1_num_mbs; } } ps_dec->u4_num_mbs_cur_nmb = 0; ps_dec->ps_cur_slice->u4_mbs_in_slice = i2_cur_mb_addr - ps_dec->ps_parse_cur_slice->u4_first_mb_in_slice; H264_DEC_DEBUG_PRINT("Mbs in slice: %d\n", ps_dec->ps_cur_slice->u4_mbs_in_slice); /* incremented here only if first slice is inserted */ if(ps_dec->u4_first_slice_in_pic != 0) { ps_dec->ps_parse_cur_slice++; ps_dec->u2_cur_slice_num++; } ps_dec->i2_prev_slice_mbx = ps_dec->u2_mbx; ps_dec->i2_prev_slice_mby = ps_dec->u2_mby; if(ps_dec->u2_total_mbs_coded >= ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs) { ps_dec->u1_pic_decode_done = 1; } return 0; } Commit Message: Decoder: Return correct error code for slice header errors Return ERROR_INV_SLICE_HDR_T instead of ERROR_INV_SPS_PPS_T for slice header errors. Bug: 34097915 Change-Id: I45d14a71f2322ff349058baaf65fb0f3c1140fba CWE ID:
WORD32 ih264d_mark_err_slice_skip(dec_struct_t * ps_dec, WORD32 num_mb_skip, UWORD8 u1_is_idr_slice, UWORD16 u2_frame_num, pocstruct_t *ps_cur_poc, WORD32 prev_slice_err) { WORD32 i2_cur_mb_addr; UWORD32 u1_num_mbs, u1_num_mbsNby2; UWORD32 u1_mb_idx = ps_dec->u1_mb_idx; UWORD32 i2_mb_skip_run; UWORD32 u1_num_mbs_next, u1_end_of_row; const UWORD32 i2_pic_wdin_mbs = ps_dec->u2_frm_wd_in_mbs; UWORD32 u1_slice_end; UWORD32 u1_tfr_n_mb; UWORD32 u1_decode_nmb; dec_bit_stream_t * const ps_bitstrm = ps_dec->ps_bitstrm; dec_slice_params_t * ps_slice = ps_dec->ps_cur_slice; UWORD32 *pu4_bitstrm_buf = ps_bitstrm->pu4_buffer; UWORD32 *pu4_bitstrm_ofst = &ps_bitstrm->u4_ofst; deblk_mb_t *ps_cur_deblk_mb; dec_mb_info_t *ps_cur_mb_info; parse_pmbarams_t *ps_parse_mb_data; UWORD32 u1_inter_mb_type; UWORD32 u1_deblk_mb_type; UWORD16 u2_total_mbs_coded; UWORD32 u1_mbaff = ps_slice->u1_mbaff_frame_flag; parse_part_params_t *ps_part_info; WORD32 ret; if(ps_dec->ps_dec_err_status->u1_err_flag & REJECT_CUR_PIC) { ih264d_err_pic_dispbuf_mgr(ps_dec); return 0; } if(ps_dec->ps_cur_slice->u1_mbaff_frame_flag && (num_mb_skip & 1)) { num_mb_skip++; } ps_dec->ps_dpb_cmds->u1_long_term_reference_flag = 0; if(prev_slice_err == 1) { /* first slice - missing/header corruption */ ps_dec->ps_cur_slice->u2_frame_num = u2_frame_num; if(!ps_dec->u1_first_slice_in_stream) { ih264d_end_of_pic(ps_dec, u1_is_idr_slice, ps_dec->ps_cur_slice->u2_frame_num); ps_dec->s_cur_pic_poc.u2_frame_num = ps_dec->ps_cur_slice->u2_frame_num; } { WORD32 i, j, poc = 0; ps_dec->ps_cur_slice->u2_first_mb_in_slice = 0; ps_dec->pf_mvpred = ih264d_mvpred_nonmbaff; ps_dec->p_form_mb_part_info = ih264d_form_mb_part_info_bp; ps_dec->p_motion_compensate = ih264d_motion_compensate_bp; if(ps_dec->ps_cur_pic != NULL) poc = ps_dec->ps_cur_pic->i4_poc + 2; j = -1; for(i = 0; i < MAX_NUM_PIC_PARAMS; i++) { if(ps_dec->ps_pps[i].u1_is_valid == TRUE) { if(ps_dec->ps_pps[i].ps_sps->u1_is_valid == TRUE) { j = i; break; } } } if(j == -1) { return ERROR_INV_SLICE_HDR_T; } /* call ih264d_start_of_pic only if it was not called earlier*/ if(ps_dec->u4_pic_buf_got == 0) { ps_dec->ps_cur_slice->u1_slice_type = P_SLICE; ps_dec->ps_cur_slice->u1_nal_ref_idc = 1; ps_dec->ps_cur_slice->u1_nal_unit_type = 1; ret = ih264d_start_of_pic(ps_dec, poc, ps_cur_poc, ps_dec->ps_cur_slice->u2_frame_num, &ps_dec->ps_pps[j]); if(ret != OK) { return ret; } } ps_dec->ps_ref_pic_buf_lx[0][0]->u1_pic_buf_id = 0; ps_dec->u4_output_present = 0; { ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer, &(ps_dec->s_disp_op)); /* If error code is non-zero then there is no buffer available for display, hence avoid format conversion */ if(0 != ps_dec->s_disp_op.u4_error_code) { ps_dec->u4_fmt_conv_cur_row = ps_dec->s_disp_frame_info.u4_y_ht; } else ps_dec->u4_output_present = 1; } if(ps_dec->u1_separate_parse == 1) { if(ps_dec->u4_dec_thread_created == 0) { ithread_create(ps_dec->pv_dec_thread_handle, NULL, (void *)ih264d_decode_picture_thread, (void *)ps_dec); ps_dec->u4_dec_thread_created = 1; } if((ps_dec->u4_num_cores == 3) && ((ps_dec->u4_app_disable_deblk_frm == 0) || ps_dec->i1_recon_in_thread3_flag) && (ps_dec->u4_bs_deblk_thread_created == 0)) { ps_dec->u4_start_recon_deblk = 0; ithread_create(ps_dec->pv_bs_deblk_thread_handle, NULL, (void *)ih264d_recon_deblk_thread, (void *)ps_dec); ps_dec->u4_bs_deblk_thread_created = 1; } } } ps_dec->u4_first_slice_in_pic = 0; } else { dec_slice_struct_t *ps_parse_cur_slice; ps_parse_cur_slice = ps_dec->ps_dec_slice_buf + ps_dec->u2_cur_slice_num; if(ps_dec->u1_slice_header_done && ps_parse_cur_slice == ps_dec->ps_parse_cur_slice) { if((u1_mbaff) && (ps_dec->u4_num_mbs_cur_nmb & 1)) { ps_dec->u4_num_mbs_cur_nmb = ps_dec->u4_num_mbs_cur_nmb - 1; ps_dec->u2_cur_mb_addr--; } u1_num_mbs = ps_dec->u4_num_mbs_cur_nmb; if(u1_num_mbs) { ps_cur_mb_info = ps_dec->ps_nmb_info + u1_num_mbs - 1; } else { if(ps_dec->u1_separate_parse) { ps_cur_mb_info = ps_dec->ps_nmb_info; } else { ps_cur_mb_info = ps_dec->ps_nmb_info + ps_dec->u4_num_mbs_prev_nmb - 1; } } ps_dec->u2_mby = ps_cur_mb_info->u2_mby; ps_dec->u2_mbx = ps_cur_mb_info->u2_mbx; ps_dec->u1_mb_ngbr_availablity = ps_cur_mb_info->u1_mb_ngbr_availablity; if(u1_num_mbs) { ps_dec->pv_parse_tu_coeff_data = ps_dec->pv_prev_mb_parse_tu_coeff_data; ps_dec->u2_cur_mb_addr--; ps_dec->i4_submb_ofst -= SUB_BLK_SIZE; if (ps_dec->u1_pr_sl_type == P_SLICE || ps_dec->u1_pr_sl_type == B_SLICE) { ps_dec->pf_mvpred_ref_tfr_nby2mb(ps_dec, u1_mb_idx, u1_num_mbs); ps_dec->ps_part = ps_dec->ps_parse_part_params; } u1_num_mbs_next = i2_pic_wdin_mbs - ps_dec->u2_mbx - 1; u1_end_of_row = (!u1_num_mbs_next) && (!(u1_mbaff && (u1_num_mbs & 0x01))); u1_slice_end = 1; u1_tfr_n_mb = 1; ps_cur_mb_info->u1_end_of_slice = u1_slice_end; if(ps_dec->u1_separate_parse) { ih264d_parse_tfr_nmb(ps_dec, u1_mb_idx, u1_num_mbs, u1_num_mbs_next, u1_tfr_n_mb, u1_end_of_row); ps_dec->ps_nmb_info += u1_num_mbs; } else { ih264d_decode_recon_tfr_nmb(ps_dec, u1_mb_idx, u1_num_mbs, u1_num_mbs_next, u1_tfr_n_mb, u1_end_of_row); } ps_dec->u2_total_mbs_coded += u1_num_mbs; ps_dec->u1_mb_idx = 0; ps_dec->u4_num_mbs_cur_nmb = 0; } if(ps_dec->u2_total_mbs_coded >= ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs) { ps_dec->u1_pic_decode_done = 1; return 0; } /* Inserting new slice only if the current slice has atleast 1 MB*/ if(ps_dec->ps_parse_cur_slice->u4_first_mb_in_slice < (UWORD32)(ps_dec->u2_total_mbs_coded >> ps_slice->u1_mbaff_frame_flag)) { ps_dec->i2_prev_slice_mbx = ps_dec->u2_mbx; ps_dec->i2_prev_slice_mby = ps_dec->u2_mby; ps_dec->u2_cur_slice_num++; ps_dec->ps_parse_cur_slice++; } } else { ps_dec->ps_parse_cur_slice = ps_dec->ps_dec_slice_buf + ps_dec->u2_cur_slice_num; } } /******************************************************/ /* Initializations to new slice */ /******************************************************/ { WORD32 num_entries; WORD32 size; UWORD8 *pu1_buf; num_entries = MIN(MAX_FRAMES, ps_dec->u4_num_ref_frames_at_init); num_entries = 2 * ((2 * num_entries) + 1); size = num_entries * sizeof(void *); size += PAD_MAP_IDX_POC * sizeof(void *); pu1_buf = (UWORD8 *)ps_dec->pv_map_ref_idx_to_poc_buf; pu1_buf += size * ps_dec->u2_cur_slice_num; ps_dec->ps_parse_cur_slice->ppv_map_ref_idx_to_poc = (volatile void **)pu1_buf; } ps_dec->ps_cur_slice->u2_first_mb_in_slice = ps_dec->u2_total_mbs_coded >> u1_mbaff; ps_dec->ps_cur_slice->i1_slice_alpha_c0_offset = 0; ps_dec->ps_cur_slice->i1_slice_beta_offset = 0; if(ps_dec->ps_cur_slice->u1_field_pic_flag) ps_dec->u2_prv_frame_num = ps_dec->ps_cur_slice->u2_frame_num; ps_dec->ps_parse_cur_slice->u4_first_mb_in_slice = ps_dec->u2_total_mbs_coded >> u1_mbaff; ps_dec->ps_parse_cur_slice->u2_log2Y_crwd = ps_dec->ps_cur_slice->u2_log2Y_crwd; if(ps_dec->u1_separate_parse) { ps_dec->ps_parse_cur_slice->pv_tu_coeff_data_start = ps_dec->pv_parse_tu_coeff_data; } else { ps_dec->pv_proc_tu_coeff_data = ps_dec->pv_parse_tu_coeff_data; } /******************************************************/ /* Initializations specific to P slice */ /******************************************************/ u1_inter_mb_type = P_MB; u1_deblk_mb_type = D_INTER_MB; ps_dec->ps_cur_slice->u1_slice_type = P_SLICE; ps_dec->ps_parse_cur_slice->slice_type = P_SLICE; ps_dec->pf_mvpred_ref_tfr_nby2mb = ih264d_mv_pred_ref_tfr_nby2_pmb; ps_dec->ps_part = ps_dec->ps_parse_part_params; ps_dec->u2_mbx = (MOD(ps_dec->ps_cur_slice->u2_first_mb_in_slice - 1, ps_dec->u2_frm_wd_in_mbs)); ps_dec->u2_mby = (DIV(ps_dec->ps_cur_slice->u2_first_mb_in_slice - 1, ps_dec->u2_frm_wd_in_mbs)); ps_dec->u2_mby <<= u1_mbaff; /******************************************************/ /* Parsing / decoding the slice */ /******************************************************/ ps_dec->u1_slice_header_done = 2; ps_dec->u1_qp = ps_slice->u1_slice_qp; ih264d_update_qp(ps_dec, 0); u1_mb_idx = ps_dec->u1_mb_idx; ps_parse_mb_data = ps_dec->ps_parse_mb_data; u1_num_mbs = u1_mb_idx; u1_slice_end = 0; u1_tfr_n_mb = 0; u1_decode_nmb = 0; u1_num_mbsNby2 = 0; i2_cur_mb_addr = ps_dec->u2_total_mbs_coded; i2_mb_skip_run = num_mb_skip; while(!u1_slice_end) { UWORD8 u1_mb_type; if(i2_cur_mb_addr > ps_dec->ps_cur_sps->u2_max_mb_addr) break; ps_cur_mb_info = ps_dec->ps_nmb_info + u1_num_mbs; ps_dec->u4_num_mbs_cur_nmb = u1_num_mbs; ps_cur_mb_info->u1_Mux = 0; ps_dec->u4_num_pmbair = (u1_num_mbs >> u1_mbaff); ps_cur_deblk_mb = ps_dec->ps_deblk_mbn + u1_num_mbs; ps_cur_mb_info->u1_end_of_slice = 0; /* Storing Default partition info */ ps_parse_mb_data->u1_num_part = 1; ps_parse_mb_data->u1_isI_mb = 0; /**************************************************************/ /* Get the required information for decoding of MB */ /**************************************************************/ /* mb_x, mb_y, neighbor availablity, */ if (u1_mbaff) ih264d_get_mb_info_cavlc_mbaff(ps_dec, i2_cur_mb_addr, ps_cur_mb_info, i2_mb_skip_run); else ih264d_get_mb_info_cavlc_nonmbaff(ps_dec, i2_cur_mb_addr, ps_cur_mb_info, i2_mb_skip_run); /* Set the deblocking parameters for this MB */ if(ps_dec->u4_app_disable_deblk_frm == 0) { ih264d_set_deblocking_parameters(ps_cur_deblk_mb, ps_slice, ps_dec->u1_mb_ngbr_availablity, ps_dec->u1_cur_mb_fld_dec_flag); } /* Set appropriate flags in ps_cur_mb_info and ps_dec */ ps_dec->i1_prev_mb_qp_delta = 0; ps_dec->u1_sub_mb_num = 0; ps_cur_mb_info->u1_mb_type = MB_SKIP; ps_cur_mb_info->u1_mb_mc_mode = PRED_16x16; ps_cur_mb_info->u1_cbp = 0; /* Storing Skip partition info */ ps_part_info = ps_dec->ps_part; ps_part_info->u1_is_direct = PART_DIRECT_16x16; ps_part_info->u1_sub_mb_num = 0; ps_dec->ps_part++; /* Update Nnzs */ ih264d_update_nnz_for_skipmb(ps_dec, ps_cur_mb_info, CAVLC); ps_cur_mb_info->ps_curmb->u1_mb_type = u1_inter_mb_type; ps_cur_deblk_mb->u1_mb_type |= u1_deblk_mb_type; i2_mb_skip_run--; ps_cur_deblk_mb->u1_mb_qp = ps_dec->u1_qp; if (u1_mbaff) { ih264d_update_mbaff_left_nnz(ps_dec, ps_cur_mb_info); } /**************************************************************/ /* Get next Macroblock address */ /**************************************************************/ i2_cur_mb_addr++; u1_num_mbs++; u1_num_mbsNby2++; ps_parse_mb_data++; /****************************************************************/ /* Check for End Of Row and other flags that determine when to */ /* do DMA setup for N/2-Mb, Decode for N-Mb, and Transfer for */ /* N-Mb */ /****************************************************************/ u1_num_mbs_next = i2_pic_wdin_mbs - ps_dec->u2_mbx - 1; u1_end_of_row = (!u1_num_mbs_next) && (!(u1_mbaff && (u1_num_mbs & 0x01))); u1_slice_end = !i2_mb_skip_run; u1_tfr_n_mb = (u1_num_mbs == ps_dec->u1_recon_mb_grp) || u1_end_of_row || u1_slice_end; u1_decode_nmb = u1_tfr_n_mb || u1_slice_end; ps_cur_mb_info->u1_end_of_slice = u1_slice_end; if(u1_decode_nmb) { ps_dec->pf_mvpred_ref_tfr_nby2mb(ps_dec, u1_mb_idx, u1_num_mbs); u1_num_mbsNby2 = 0; ps_parse_mb_data = ps_dec->ps_parse_mb_data; ps_dec->ps_part = ps_dec->ps_parse_part_params; if(ps_dec->u1_separate_parse) { ih264d_parse_tfr_nmb(ps_dec, u1_mb_idx, u1_num_mbs, u1_num_mbs_next, u1_tfr_n_mb, u1_end_of_row); ps_dec->ps_nmb_info += u1_num_mbs; } else { ih264d_decode_recon_tfr_nmb(ps_dec, u1_mb_idx, u1_num_mbs, u1_num_mbs_next, u1_tfr_n_mb, u1_end_of_row); } ps_dec->u2_total_mbs_coded += u1_num_mbs; if(u1_tfr_n_mb) u1_num_mbs = 0; u1_mb_idx = u1_num_mbs; ps_dec->u1_mb_idx = u1_num_mbs; } } ps_dec->u4_num_mbs_cur_nmb = 0; ps_dec->ps_cur_slice->u4_mbs_in_slice = i2_cur_mb_addr - ps_dec->ps_parse_cur_slice->u4_first_mb_in_slice; H264_DEC_DEBUG_PRINT("Mbs in slice: %d\n", ps_dec->ps_cur_slice->u4_mbs_in_slice); /* incremented here only if first slice is inserted */ if(ps_dec->u4_first_slice_in_pic != 0) { ps_dec->ps_parse_cur_slice++; ps_dec->u2_cur_slice_num++; } ps_dec->i2_prev_slice_mbx = ps_dec->u2_mbx; ps_dec->i2_prev_slice_mby = ps_dec->u2_mby; if(ps_dec->u2_total_mbs_coded >= ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs) { ps_dec->u1_pic_decode_done = 1; } return 0; }
174,042
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: std::string TestFlashMessageLoop::TestBasics() { message_loop_ = new pp::flash::MessageLoop(instance_); pp::CompletionCallback callback = callback_factory_.NewCallback( &TestFlashMessageLoop::QuitMessageLoopTask); pp::Module::Get()->core()->CallOnMainThread(0, callback); int32_t result = message_loop_->Run(); ASSERT_TRUE(message_loop_); delete message_loop_; message_loop_ = NULL; ASSERT_EQ(PP_OK, result); PASS(); } Commit Message: Fix PPB_Flash_MessageLoop. This CL suspends script callbacks and resource loads while running nested message loop using PPB_Flash_MessageLoop. BUG=569496 Review URL: https://codereview.chromium.org/1559113002 Cr-Commit-Position: refs/heads/master@{#374529} CWE ID: CWE-264
std::string TestFlashMessageLoop::TestBasics() { message_loop_ = new pp::flash::MessageLoop(instance_); pp::CompletionCallback callback = callback_factory_.NewCallback( &TestFlashMessageLoop::QuitMessageLoopTask); pp::Module::Get()->core()->CallOnMainThread(0, callback); int32_t result = message_loop_->Run(); ASSERT_TRUE(message_loop_); delete message_loop_; message_loop_ = nullptr; ASSERT_EQ(PP_OK, result); PASS(); }
172,126
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol, bool add_on_replace) { struct snd_ctl_elem_id id; unsigned int idx; struct snd_kcontrol *old; int ret; if (!kcontrol) return -EINVAL; if (snd_BUG_ON(!card || !kcontrol->info)) { ret = -EINVAL; goto error; } id = kcontrol->id; down_write(&card->controls_rwsem); old = snd_ctl_find_id(card, &id); if (!old) { if (add_on_replace) goto add; up_write(&card->controls_rwsem); ret = -EINVAL; goto error; } ret = snd_ctl_remove(card, old); if (ret < 0) { up_write(&card->controls_rwsem); goto error; } add: if (snd_ctl_find_hole(card, kcontrol->count) < 0) { up_write(&card->controls_rwsem); ret = -ENOMEM; goto error; } list_add_tail(&kcontrol->list, &card->controls); card->controls_count += kcontrol->count; kcontrol->id.numid = card->last_numid + 1; card->last_numid += kcontrol->count; up_write(&card->controls_rwsem); for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++) snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id); return 0; error: snd_ctl_free_one(kcontrol); return ret; } Commit Message: ALSA: control: Don't access controls outside of protected regions A control that is visible on the card->controls list can be freed at any time. This means we must not access any of its memory while not holding the controls_rw_lock. Otherwise we risk a use after free access. Signed-off-by: Lars-Peter Clausen <[email protected]> Acked-by: Jaroslav Kysela <[email protected]> Cc: <[email protected]> Signed-off-by: Takashi Iwai <[email protected]> CWE ID:
int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol, bool add_on_replace) { struct snd_ctl_elem_id id; unsigned int count; unsigned int idx; struct snd_kcontrol *old; int ret; if (!kcontrol) return -EINVAL; if (snd_BUG_ON(!card || !kcontrol->info)) { ret = -EINVAL; goto error; } id = kcontrol->id; down_write(&card->controls_rwsem); old = snd_ctl_find_id(card, &id); if (!old) { if (add_on_replace) goto add; up_write(&card->controls_rwsem); ret = -EINVAL; goto error; } ret = snd_ctl_remove(card, old); if (ret < 0) { up_write(&card->controls_rwsem); goto error; } add: if (snd_ctl_find_hole(card, kcontrol->count) < 0) { up_write(&card->controls_rwsem); ret = -ENOMEM; goto error; } list_add_tail(&kcontrol->list, &card->controls); card->controls_count += kcontrol->count; kcontrol->id.numid = card->last_numid + 1; card->last_numid += kcontrol->count; count = kcontrol->count; up_write(&card->controls_rwsem); for (idx = 0; idx < count; idx++, id.index++, id.numid++) snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id); return 0; error: snd_ctl_free_one(kcontrol); return ret; }
166,294
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static EncodedJSValue JSC_HOST_CALL jsTestObjPrototypeFunctionOverloadedMethod7(ExecState* exec) { JSValue thisValue = exec->hostThisValue(); if (!thisValue.inherits(&JSTestObj::s_info)) return throwVMTypeError(exec); JSTestObj* castedThis = jsCast<JSTestObj*>(asObject(thisValue)); ASSERT_GC_OBJECT_INHERITS(castedThis, &JSTestObj::s_info); TestObj* impl = static_cast<TestObj*>(castedThis->impl()); if (exec->argumentCount() < 1) return throwVMError(exec, createTypeError(exec, "Not enough arguments")); DOMStringList* arrayArg(toDOMStringList(MAYBE_MISSING_PARAMETER(exec, 0, DefaultIsUndefined))); if (exec->hadException()) return JSValue::encode(jsUndefined()); impl->overloadedMethod(arrayArg); return JSValue::encode(jsUndefined()); } Commit Message: [JSC] Implement a helper method createNotEnoughArgumentsError() https://bugs.webkit.org/show_bug.cgi?id=85102 Reviewed by Geoffrey Garen. In bug 84787, kbr@ requested to avoid hard-coding createTypeError(exec, "Not enough arguments") here and there. This patch implements createNotEnoughArgumentsError(exec) and uses it in JSC bindings. c.f. a corresponding bug for V8 bindings is bug 85097. Source/JavaScriptCore: * runtime/Error.cpp: (JSC::createNotEnoughArgumentsError): (JSC): * runtime/Error.h: (JSC): Source/WebCore: Test: bindings/scripts/test/TestObj.idl * bindings/scripts/CodeGeneratorJS.pm: Modified as described above. (GenerateArgumentsCountCheck): * bindings/js/JSDataViewCustom.cpp: Ditto. (WebCore::getDataViewMember): (WebCore::setDataViewMember): * bindings/js/JSDeprecatedPeerConnectionCustom.cpp: (WebCore::JSDeprecatedPeerConnectionConstructor::constructJSDeprecatedPeerConnection): * bindings/js/JSDirectoryEntryCustom.cpp: (WebCore::JSDirectoryEntry::getFile): (WebCore::JSDirectoryEntry::getDirectory): * bindings/js/JSSharedWorkerCustom.cpp: (WebCore::JSSharedWorkerConstructor::constructJSSharedWorker): * bindings/js/JSWebKitMutationObserverCustom.cpp: (WebCore::JSWebKitMutationObserverConstructor::constructJSWebKitMutationObserver): (WebCore::JSWebKitMutationObserver::observe): * bindings/js/JSWorkerCustom.cpp: (WebCore::JSWorkerConstructor::constructJSWorker): * bindings/scripts/test/JS/JSFloat64Array.cpp: Updated run-bindings-tests. (WebCore::jsFloat64ArrayPrototypeFunctionFoo): * bindings/scripts/test/JS/JSTestActiveDOMObject.cpp: (WebCore::jsTestActiveDOMObjectPrototypeFunctionExcitingFunction): (WebCore::jsTestActiveDOMObjectPrototypeFunctionPostMessage): * bindings/scripts/test/JS/JSTestCustomNamedGetter.cpp: (WebCore::jsTestCustomNamedGetterPrototypeFunctionAnotherFunction): * bindings/scripts/test/JS/JSTestEventTarget.cpp: (WebCore::jsTestEventTargetPrototypeFunctionItem): (WebCore::jsTestEventTargetPrototypeFunctionAddEventListener): (WebCore::jsTestEventTargetPrototypeFunctionRemoveEventListener): (WebCore::jsTestEventTargetPrototypeFunctionDispatchEvent): * bindings/scripts/test/JS/JSTestInterface.cpp: (WebCore::JSTestInterfaceConstructor::constructJSTestInterface): (WebCore::jsTestInterfacePrototypeFunctionSupplementalMethod2): * bindings/scripts/test/JS/JSTestMediaQueryListListener.cpp: (WebCore::jsTestMediaQueryListListenerPrototypeFunctionMethod): * bindings/scripts/test/JS/JSTestNamedConstructor.cpp: (WebCore::JSTestNamedConstructorNamedConstructor::constructJSTestNamedConstructor): * bindings/scripts/test/JS/JSTestObj.cpp: (WebCore::JSTestObjConstructor::constructJSTestObj): (WebCore::jsTestObjPrototypeFunctionVoidMethodWithArgs): (WebCore::jsTestObjPrototypeFunctionIntMethodWithArgs): (WebCore::jsTestObjPrototypeFunctionObjMethodWithArgs): (WebCore::jsTestObjPrototypeFunctionMethodWithSequenceArg): (WebCore::jsTestObjPrototypeFunctionMethodReturningSequence): (WebCore::jsTestObjPrototypeFunctionMethodThatRequiresAllArgsAndThrows): (WebCore::jsTestObjPrototypeFunctionSerializedValue): (WebCore::jsTestObjPrototypeFunctionIdbKey): (WebCore::jsTestObjPrototypeFunctionOptionsObject): (WebCore::jsTestObjPrototypeFunctionAddEventListener): (WebCore::jsTestObjPrototypeFunctionRemoveEventListener): (WebCore::jsTestObjPrototypeFunctionMethodWithNonOptionalArgAndOptionalArg): (WebCore::jsTestObjPrototypeFunctionMethodWithNonOptionalArgAndTwoOptionalArgs): (WebCore::jsTestObjPrototypeFunctionMethodWithCallbackArg): (WebCore::jsTestObjPrototypeFunctionMethodWithNonCallbackArgAndCallbackArg): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod1): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod2): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod3): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod4): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod5): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod6): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod7): (WebCore::jsTestObjConstructorFunctionClassMethod2): (WebCore::jsTestObjConstructorFunctionOverloadedMethod11): (WebCore::jsTestObjConstructorFunctionOverloadedMethod12): (WebCore::jsTestObjPrototypeFunctionMethodWithUnsignedLongArray): (WebCore::jsTestObjPrototypeFunctionConvert1): (WebCore::jsTestObjPrototypeFunctionConvert2): (WebCore::jsTestObjPrototypeFunctionConvert3): (WebCore::jsTestObjPrototypeFunctionConvert4): (WebCore::jsTestObjPrototypeFunctionConvert5): (WebCore::jsTestObjPrototypeFunctionStrictFunction): * bindings/scripts/test/JS/JSTestSerializedScriptValueInterface.cpp: (WebCore::JSTestSerializedScriptValueInterfaceConstructor::constructJSTestSerializedScriptValueInterface): (WebCore::jsTestSerializedScriptValueInterfacePrototypeFunctionAcceptTransferList): git-svn-id: svn://svn.chromium.org/blink/trunk@115536 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-20
static EncodedJSValue JSC_HOST_CALL jsTestObjPrototypeFunctionOverloadedMethod7(ExecState* exec) { JSValue thisValue = exec->hostThisValue(); if (!thisValue.inherits(&JSTestObj::s_info)) return throwVMTypeError(exec); JSTestObj* castedThis = jsCast<JSTestObj*>(asObject(thisValue)); ASSERT_GC_OBJECT_INHERITS(castedThis, &JSTestObj::s_info); TestObj* impl = static_cast<TestObj*>(castedThis->impl()); if (exec->argumentCount() < 1) return throwVMError(exec, createNotEnoughArgumentsError(exec)); DOMStringList* arrayArg(toDOMStringList(MAYBE_MISSING_PARAMETER(exec, 0, DefaultIsUndefined))); if (exec->hadException()) return JSValue::encode(jsUndefined()); impl->overloadedMethod(arrayArg); return JSValue::encode(jsUndefined()); }
170,606
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void PopupContainer::showInRect(const IntRect& r, FrameView* v, int index) { listBox()->setBaseWidth(max(r.width() - kBorderSize * 2, 0)); listBox()->updateFromElement(); IntPoint location = v->contentsToWindow(r.location()); location.move(0, r.height()); m_originalFrameRect = IntRect(location, r.size()); setFrameRect(m_originalFrameRect); showPopup(v); } Commit Message: [REGRESSION] Refreshed autofill popup renders garbage https://bugs.webkit.org/show_bug.cgi?id=83255 http://code.google.com/p/chromium/issues/detail?id=118374 The code used to update only the PopupContainer coordinates as if they were the coordinates relative to the root view. Instead, a WebWidget positioned relative to the screen origin holds the PopupContainer, so it is the WebWidget that should be positioned in PopupContainer::refresh(), and the PopupContainer's location should be (0, 0) (and their sizes should always be equal). Reviewed by Kent Tamura. No new tests, as the popup appearance is not testable in WebKit. * platform/chromium/PopupContainer.cpp: (WebCore::PopupContainer::layoutAndCalculateWidgetRect): Variable renamed. (WebCore::PopupContainer::showPopup): Use m_originalFrameRect rather than frameRect() for passing into chromeClient. (WebCore::PopupContainer::showInRect): Set up the correct frameRect() for the container. (WebCore::PopupContainer::refresh): Resize the container and position the WebWidget correctly. * platform/chromium/PopupContainer.h: (PopupContainer): git-svn-id: svn://svn.chromium.org/blink/trunk@113418 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-119
void PopupContainer::showInRect(const IntRect& r, FrameView* v, int index) { listBox()->setBaseWidth(max(r.width() - kBorderSize * 2, 0)); listBox()->updateFromElement(); IntPoint location = v->contentsToWindow(r.location()); location.move(0, r.height()); m_originalFrameRect = IntRect(location, r.size()); // Position at (0, 0) since the frameRect().location() is relative to the parent WebWidget. setFrameRect(IntRect(IntPoint(), r.size())); showPopup(v); }
171,028
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int handle_vmon(struct kvm_vcpu *vcpu) { int ret; gpa_t vmptr; struct page *page; struct vcpu_vmx *vmx = to_vmx(vcpu); const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; /* * The Intel VMX Instruction Reference lists a bunch of bits that are * prerequisite to running VMXON, most notably cr4.VMXE must be set to * 1 (see vmx_set_cr4() for when we allow the guest to set this). * Otherwise, we should fail with #UD. But most faulting conditions * have already been checked by hardware, prior to the VM-exit for * VMXON. We do test guest cr4.VMXE because processor CR4 always has * that bit set to 1 in non-root mode. */ if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } if (vmx->nested.vmxon) { nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); return kvm_skip_emulated_instruction(vcpu); } if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) != VMXON_NEEDED_FEATURES) { kvm_inject_gp(vcpu, 0); return 1; } if (nested_vmx_get_vmptr(vcpu, &vmptr)) return 1; /* * SDM 3: 24.11.5 * The first 4 bytes of VMXON region contain the supported * VMCS revision identifier * * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; * which replaces physical address width with 32 */ if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { nested_vmx_failInvalid(vcpu); return kvm_skip_emulated_instruction(vcpu); } page = kvm_vcpu_gpa_to_page(vcpu, vmptr); if (is_error_page(page)) { nested_vmx_failInvalid(vcpu); return kvm_skip_emulated_instruction(vcpu); } if (*(u32 *)kmap(page) != VMCS12_REVISION) { kunmap(page); kvm_release_page_clean(page); nested_vmx_failInvalid(vcpu); return kvm_skip_emulated_instruction(vcpu); } kunmap(page); kvm_release_page_clean(page); vmx->nested.vmxon_ptr = vmptr; ret = enter_vmx_operation(vcpu); if (ret) return ret; nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } Commit Message: kvm: nVMX: Enforce cpl=0 for VMX instructions VMX instructions executed inside a L1 VM will always trigger a VM exit even when executed with cpl 3. This means we must perform the privilege check in software. Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks") Cc: [email protected] Signed-off-by: Felix Wilhelm <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]> CWE ID:
static int handle_vmon(struct kvm_vcpu *vcpu) { int ret; gpa_t vmptr; struct page *page; struct vcpu_vmx *vmx = to_vmx(vcpu); const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; /* * The Intel VMX Instruction Reference lists a bunch of bits that are * prerequisite to running VMXON, most notably cr4.VMXE must be set to * 1 (see vmx_set_cr4() for when we allow the guest to set this). * Otherwise, we should fail with #UD. But most faulting conditions * have already been checked by hardware, prior to the VM-exit for * VMXON. We do test guest cr4.VMXE because processor CR4 always has * that bit set to 1 in non-root mode. */ if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } /* CPL=0 must be checked manually. */ if (vmx_get_cpl(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } if (vmx->nested.vmxon) { nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); return kvm_skip_emulated_instruction(vcpu); } if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) != VMXON_NEEDED_FEATURES) { kvm_inject_gp(vcpu, 0); return 1; } if (nested_vmx_get_vmptr(vcpu, &vmptr)) return 1; /* * SDM 3: 24.11.5 * The first 4 bytes of VMXON region contain the supported * VMCS revision identifier * * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; * which replaces physical address width with 32 */ if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { nested_vmx_failInvalid(vcpu); return kvm_skip_emulated_instruction(vcpu); } page = kvm_vcpu_gpa_to_page(vcpu, vmptr); if (is_error_page(page)) { nested_vmx_failInvalid(vcpu); return kvm_skip_emulated_instruction(vcpu); } if (*(u32 *)kmap(page) != VMCS12_REVISION) { kunmap(page); kvm_release_page_clean(page); nested_vmx_failInvalid(vcpu); return kvm_skip_emulated_instruction(vcpu); } kunmap(page); kvm_release_page_clean(page); vmx->nested.vmxon_ptr = vmptr; ret = enter_vmx_operation(vcpu); if (ret) return ret; nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); }
169,173
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int get_default_root(pool *p, int allow_symlinks, const char **root) { config_rec *c = NULL; const char *dir = NULL; int res; c = find_config(main_server->conf, CONF_PARAM, "DefaultRoot", FALSE); while (c != NULL) { pr_signals_handle(); /* Check the groups acl */ if (c->argc < 2) { dir = c->argv[0]; break; } res = pr_expr_eval_group_and(((char **) c->argv)+1); if (res) { dir = c->argv[0]; break; } c = find_config_next(c, c->next, CONF_PARAM, "DefaultRoot", FALSE); } if (dir != NULL) { const char *new_dir; /* Check for any expandable variables. */ new_dir = path_subst_uservar(p, &dir); if (new_dir != NULL) { dir = new_dir; } if (strncmp(dir, "/", 2) == 0) { dir = NULL; } else { char *realdir; int xerrno = 0; if (allow_symlinks == FALSE) { char *path, target_path[PR_TUNABLE_PATH_MAX + 1]; struct stat st; size_t pathlen; /* First, deal with any possible interpolation. dir_realpath() will * do this for us, but dir_realpath() ALSO automatically follows * symlinks, which is what we do NOT want to do here. */ path = pstrdup(p, dir); if (*path != '/') { if (*path == '~') { if (pr_fs_interpolate(dir, target_path, sizeof(target_path)-1) < 0) { return -1; } path = target_path; } } /* Note: lstat(2) is sensitive to the presence of a trailing slash on * the path, particularly in the case of a symlink to a directory. * Thus to get the correct test, we need to remove any trailing slash * that might be present. Subtle. */ pathlen = strlen(path); if (pathlen > 1 && path[pathlen-1] == '/') { path[pathlen-1] = '\0'; } pr_fs_clear_cache2(path); res = pr_fsio_lstat(path, &st); if (res < 0) { xerrno = errno; pr_log_pri(PR_LOG_WARNING, "error: unable to check %s: %s", path, strerror(xerrno)); errno = xerrno; return -1; } if (S_ISLNK(st.st_mode)) { pr_log_pri(PR_LOG_WARNING, "error: DefaultRoot %s is a symlink (denied by AllowChrootSymlinks " "config)", path); errno = EPERM; return -1; } } /* We need to be the final user here so that if the user has their home * directory with a mode the user proftpd is running (i.e. the User * directive) as can not traverse down, we can still have the default * root. */ pr_fs_clear_cache2(dir); PRIVS_USER realdir = dir_realpath(p, dir); xerrno = errno; PRIVS_RELINQUISH if (realdir) { dir = realdir; } else { /* Try to provide a more informative message. */ char interp_dir[PR_TUNABLE_PATH_MAX + 1]; memset(interp_dir, '\0', sizeof(interp_dir)); (void) pr_fs_interpolate(dir, interp_dir, sizeof(interp_dir)-1); pr_log_pri(PR_LOG_NOTICE, "notice: unable to use DefaultRoot '%s' [resolved to '%s']: %s", dir, interp_dir, strerror(xerrno)); errno = xerrno; } } } *root = dir; return 0; } Commit Message: Walk the entire DefaultRoot path, checking for symlinks of any component, when AllowChrootSymlinks is disabled. CWE ID: CWE-59
static int get_default_root(pool *p, int allow_symlinks, const char **root) { config_rec *c = NULL; const char *dir = NULL; int res; c = find_config(main_server->conf, CONF_PARAM, "DefaultRoot", FALSE); while (c != NULL) { pr_signals_handle(); /* Check the groups acl */ if (c->argc < 2) { dir = c->argv[0]; break; } res = pr_expr_eval_group_and(((char **) c->argv)+1); if (res) { dir = c->argv[0]; break; } c = find_config_next(c, c->next, CONF_PARAM, "DefaultRoot", FALSE); } if (dir != NULL) { const char *new_dir; /* Check for any expandable variables. */ new_dir = path_subst_uservar(p, &dir); if (new_dir != NULL) { dir = new_dir; } if (strncmp(dir, "/", 2) == 0) { dir = NULL; } else { char *realdir; int xerrno = 0; if (allow_symlinks == FALSE) { char *path, target_path[PR_TUNABLE_PATH_MAX + 1]; size_t pathlen; /* First, deal with any possible interpolation. dir_realpath() will * do this for us, but dir_realpath() ALSO automatically follows * symlinks, which is what we do NOT want to do here. */ path = pstrdup(p, dir); if (*path != '/') { if (*path == '~') { if (pr_fs_interpolate(dir, target_path, sizeof(target_path)-1) < 0) { return -1; } path = target_path; } } /* Note: lstat(2) is sensitive to the presence of a trailing slash on * the path, particularly in the case of a symlink to a directory. * Thus to get the correct test, we need to remove any trailing slash * that might be present. Subtle. */ pathlen = strlen(path); if (pathlen > 1 && path[pathlen-1] == '/') { path[pathlen-1] = '\0'; } res = is_symlink_path(p, path, pathlen); if (res < 0) { if (errno == EPERM) { pr_log_pri(PR_LOG_WARNING, "error: DefaultRoot %s is a symlink " "(denied by AllowChrootSymlinks config)", path); } errno = EPERM; return -1; } } /* We need to be the final user here so that if the user has their home * directory with a mode the user proftpd is running (i.e. the User * directive) as can not traverse down, we can still have the default * root. */ pr_fs_clear_cache2(dir); PRIVS_USER realdir = dir_realpath(p, dir); xerrno = errno; PRIVS_RELINQUISH if (realdir) { dir = realdir; } else { /* Try to provide a more informative message. */ char interp_dir[PR_TUNABLE_PATH_MAX + 1]; memset(interp_dir, '\0', sizeof(interp_dir)); (void) pr_fs_interpolate(dir, interp_dir, sizeof(interp_dir)-1); pr_log_pri(PR_LOG_NOTICE, "notice: unable to use DefaultRoot '%s' [resolved to '%s']: %s", dir, interp_dir, strerror(xerrno)); errno = xerrno; } } } *root = dir; return 0; }
168,278
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool TradQT_Manager::ParseCachedBoxes ( const MOOV_Manager & moovMgr ) { MOOV_Manager::BoxInfo udtaInfo; MOOV_Manager::BoxRef udtaRef = moovMgr.GetBox ( "moov/udta", &udtaInfo ); if ( udtaRef == 0 ) return false; for ( XMP_Uns32 i = 0; i < udtaInfo.childCount; ++i ) { MOOV_Manager::BoxInfo currInfo; MOOV_Manager::BoxRef currRef = moovMgr.GetNthChild ( udtaRef, i, &currInfo ); if ( currRef == 0 ) break; // Sanity check, should not happen. if ( (currInfo.boxType >> 24) != 0xA9 ) continue; if ( currInfo.contentSize < 2+2+1 ) continue; // Want enough for a non-empty value. InfoMapPos newInfo = this->parsedBoxes.insert ( this->parsedBoxes.end(), InfoMap::value_type ( currInfo.boxType, ParsedBoxInfo ( currInfo.boxType ) ) ); std::vector<ValueInfo> * newValues = &newInfo->second.values; XMP_Uns8 * boxPtr = (XMP_Uns8*) currInfo.content; XMP_Uns8 * boxEnd = boxPtr + currInfo.contentSize; XMP_Uns16 miniLen, macLang; for ( ; boxPtr < boxEnd-4; boxPtr += miniLen ) { miniLen = 4 + GetUns16BE ( boxPtr ); // ! Include header in local miniLen. macLang = GetUns16BE ( boxPtr+2); if ( (miniLen <= 4) || (miniLen > (boxEnd - boxPtr)) ) continue; // Ignore bad or empty values. XMP_StringPtr valuePtr = (char*)(boxPtr+4); size_t valueLen = miniLen - 4; newValues->push_back ( ValueInfo() ); ValueInfo * newValue = &newValues->back(); newValue->macLang = macLang; if ( IsMacLangKnown ( macLang ) ) newValue->xmpLang = GetXMPLang ( macLang ); newValue->macValue.assign ( valuePtr, valueLen ); } } return (! this->parsedBoxes.empty()); } // TradQT_Manager::ParseCachedBoxes Commit Message: CWE ID: CWE-835
bool TradQT_Manager::ParseCachedBoxes ( const MOOV_Manager & moovMgr ) { MOOV_Manager::BoxInfo udtaInfo; MOOV_Manager::BoxRef udtaRef = moovMgr.GetBox ( "moov/udta", &udtaInfo ); if ( udtaRef == 0 ) return false; for ( XMP_Uns32 i = 0; i < udtaInfo.childCount; ++i ) { MOOV_Manager::BoxInfo currInfo; MOOV_Manager::BoxRef currRef = moovMgr.GetNthChild ( udtaRef, i, &currInfo ); if ( currRef == 0 ) break; // Sanity check, should not happen. if ( (currInfo.boxType >> 24) != 0xA9 ) continue; if ( currInfo.contentSize < 2+2+1 ) continue; // Want enough for a non-empty value. InfoMapPos newInfo = this->parsedBoxes.insert ( this->parsedBoxes.end(), InfoMap::value_type ( currInfo.boxType, ParsedBoxInfo ( currInfo.boxType ) ) ); std::vector<ValueInfo> * newValues = &newInfo->second.values; XMP_Uns8 * boxPtr = (XMP_Uns8*) currInfo.content; XMP_Uns8 * boxEnd = boxPtr + currInfo.contentSize; XMP_Uns16 miniLen, macLang; for ( ; boxPtr < boxEnd-4; boxPtr += miniLen ) { miniLen = 4 + GetUns16BE ( boxPtr ); // ! Include header in local miniLen. macLang = GetUns16BE ( boxPtr+2); if ( (miniLen <= 4) || (miniLen > (boxEnd - boxPtr)) ) break; // Ignore bad or empty values. XMP_StringPtr valuePtr = (char*)(boxPtr+4); size_t valueLen = miniLen - 4; newValues->push_back ( ValueInfo() ); ValueInfo * newValue = &newValues->back(); newValue->macLang = macLang; if ( IsMacLangKnown ( macLang ) ) newValue->xmpLang = GetXMPLang ( macLang ); newValue->macValue.assign ( valuePtr, valueLen ); } } return (! this->parsedBoxes.empty()); } // TradQT_Manager::ParseCachedBoxes
165,364
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ff_jpeg2000_cleanup(Jpeg2000Component *comp, Jpeg2000CodingStyle *codsty) { int reslevelno, bandno, precno; for (reslevelno = 0; comp->reslevel && reslevelno < codsty->nreslevels; reslevelno++) { Jpeg2000ResLevel *reslevel = comp->reslevel + reslevelno; for (bandno = 0; bandno < reslevel->nbands; bandno++) { Jpeg2000Band *band = reslevel->band + bandno; for (precno = 0; precno < reslevel->num_precincts_x * reslevel->num_precincts_y; precno++) { Jpeg2000Prec *prec = band->prec + precno; av_freep(&prec->zerobits); av_freep(&prec->cblkincl); av_freep(&prec->cblk); } av_freep(&band->prec); } av_freep(&reslevel->band); } ff_dwt_destroy(&comp->dwt); av_freep(&comp->reslevel); av_freep(&comp->i_data); av_freep(&comp->f_data); } Commit Message: jpeg2000: fix dereferencing invalid pointers Found-by: Laurent Butti <[email protected]> Signed-off-by: Michael Niedermayer <[email protected]> CWE ID:
void ff_jpeg2000_cleanup(Jpeg2000Component *comp, Jpeg2000CodingStyle *codsty) { int reslevelno, bandno, precno; for (reslevelno = 0; comp->reslevel && reslevelno < codsty->nreslevels; reslevelno++) { Jpeg2000ResLevel *reslevel = comp->reslevel + reslevelno; for (bandno = 0; bandno < reslevel->nbands; bandno++) { Jpeg2000Band *band = reslevel->band + bandno; for (precno = 0; precno < reslevel->num_precincts_x * reslevel->num_precincts_y; precno++) { if (band->prec) { Jpeg2000Prec *prec = band->prec + precno; av_freep(&prec->zerobits); av_freep(&prec->cblkincl); av_freep(&prec->cblk); } } av_freep(&band->prec); } av_freep(&reslevel->band); } ff_dwt_destroy(&comp->dwt); av_freep(&comp->reslevel); av_freep(&comp->i_data); av_freep(&comp->f_data); }
165,921
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: long Cluster::CreateBlockGroup(long long start_offset, long long size, long long discard_padding) { assert(m_entries); assert(m_entries_size > 0); assert(m_entries_count >= 0); assert(m_entries_count < m_entries_size); IMkvReader* const pReader = m_pSegment->m_pReader; long long pos = start_offset; const long long stop = start_offset + size; long long prev = 1; // nonce long long next = 0; // nonce long long duration = -1; // really, this is unsigned long long bpos = -1; long long bsize = -1; while (pos < stop) { long len; const long long id = ReadUInt(pReader, pos, len); assert(id >= 0); // TODO assert((pos + len) <= stop); pos += len; // consume ID const long long size = ReadUInt(pReader, pos, len); assert(size >= 0); // TODO assert((pos + len) <= stop); pos += len; // consume size if (id == 0x21) { // Block ID if (bpos < 0) { // Block ID bpos = pos; bsize = size; } } else if (id == 0x1B) { // Duration ID assert(size <= 8); duration = UnserializeUInt(pReader, pos, size); assert(duration >= 0); // TODO } else if (id == 0x7B) { // ReferenceBlock assert(size <= 8); const long size_ = static_cast<long>(size); long long time; long status = UnserializeInt(pReader, pos, size_, time); assert(status == 0); if (status != 0) return -1; if (time <= 0) // see note above prev = time; else // weird next = time; } pos += size; // consume payload assert(pos <= stop); } assert(pos == stop); assert(bpos >= 0); assert(bsize >= 0); const long idx = m_entries_count; BlockEntry** const ppEntry = m_entries + idx; BlockEntry*& pEntry = *ppEntry; pEntry = new (std::nothrow) BlockGroup(this, idx, bpos, bsize, prev, next, duration, discard_padding); if (pEntry == NULL) return -1; // generic error BlockGroup* const p = static_cast<BlockGroup*>(pEntry); const long status = p->Parse(); if (status == 0) { // success ++m_entries_count; return 0; } delete pEntry; pEntry = 0; return status; } Commit Message: external/libvpx/libwebm: Update snapshot Update libwebm snapshot. This update contains security fixes from upstream. Upstream git hash: 229f49347d19b0ca0941e072b199a242ef6c5f2b BUG=23167726 Change-Id: Id3e140e7b31ae11294724b1ecfe2e9c83b4d4207 (cherry picked from commit d0281a15b3c6bd91756e453cc9398c5ef412d99a) CWE ID: CWE-20
long Cluster::CreateBlockGroup(long long start_offset, long long size, long long discard_padding) { assert(m_entries); assert(m_entries_size > 0); assert(m_entries_count >= 0); assert(m_entries_count < m_entries_size); IMkvReader* const pReader = m_pSegment->m_pReader; long long pos = start_offset; const long long stop = start_offset + size; long long prev = 1; // nonce long long next = 0; // nonce long long duration = -1; // really, this is unsigned long long bpos = -1; long long bsize = -1; while (pos < stop) { long len; const long long id = ReadID(pReader, pos, len); if (id < 0 || (pos + len) > stop) return E_FILE_FORMAT_INVALID; pos += len; // consume ID const long long size = ReadUInt(pReader, pos, len); assert(size >= 0); // TODO assert((pos + len) <= stop); pos += len; // consume size if (id == 0x21) { // Block ID if (bpos < 0) { // Block ID bpos = pos; bsize = size; } } else if (id == 0x1B) { // Duration ID if (size > 8) return E_FILE_FORMAT_INVALID; duration = UnserializeUInt(pReader, pos, size); if (duration < 0) return E_FILE_FORMAT_INVALID; } else if (id == 0x7B) { // ReferenceBlock if (size > 8 || size <= 0) return E_FILE_FORMAT_INVALID; const long size_ = static_cast<long>(size); long long time; long status = UnserializeInt(pReader, pos, size_, time); assert(status == 0); if (status != 0) return -1; if (time <= 0) // see note above prev = time; else // weird next = time; } pos += size; // consume payload if (pos > stop) return E_FILE_FORMAT_INVALID; } if (bpos < 0) return E_FILE_FORMAT_INVALID; if (pos != stop) return E_FILE_FORMAT_INVALID; assert(bsize >= 0); const long idx = m_entries_count; BlockEntry** const ppEntry = m_entries + idx; BlockEntry*& pEntry = *ppEntry; pEntry = new (std::nothrow) BlockGroup(this, idx, bpos, bsize, prev, next, duration, discard_padding); if (pEntry == NULL) return -1; // generic error BlockGroup* const p = static_cast<BlockGroup*>(pEntry); const long status = p->Parse(); if (status == 0) { // success ++m_entries_count; return 0; } delete pEntry; pEntry = 0; return status; }
173,806
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: mobility_opt_print(netdissect_options *ndo, const u_char *bp, const unsigned len) { unsigned i, optlen; for (i = 0; i < len; i += optlen) { ND_TCHECK(bp[i]); if (bp[i] == IP6MOPT_PAD1) optlen = 1; else { if (i + 1 < len) { ND_TCHECK(bp[i + 1]); optlen = bp[i + 1] + 2; } else goto trunc; } if (i + optlen > len) goto trunc; ND_TCHECK(bp[i + optlen]); switch (bp[i]) { case IP6MOPT_PAD1: ND_PRINT((ndo, "(pad1)")); break; case IP6MOPT_PADN: if (len - i < IP6MOPT_MINLEN) { ND_PRINT((ndo, "(padn: trunc)")); goto trunc; } ND_PRINT((ndo, "(padn)")); break; case IP6MOPT_REFRESH: if (len - i < IP6MOPT_REFRESH_MINLEN) { ND_PRINT((ndo, "(refresh: trunc)")); goto trunc; } /* units of 4 secs */ ND_PRINT((ndo, "(refresh: %u)", EXTRACT_16BITS(&bp[i+2]) << 2)); break; case IP6MOPT_ALTCOA: if (len - i < IP6MOPT_ALTCOA_MINLEN) { ND_PRINT((ndo, "(altcoa: trunc)")); goto trunc; } ND_PRINT((ndo, "(alt-CoA: %s)", ip6addr_string(ndo, &bp[i+2]))); break; case IP6MOPT_NONCEID: if (len - i < IP6MOPT_NONCEID_MINLEN) { ND_PRINT((ndo, "(ni: trunc)")); goto trunc; } ND_PRINT((ndo, "(ni: ho=0x%04x co=0x%04x)", EXTRACT_16BITS(&bp[i+2]), EXTRACT_16BITS(&bp[i+4]))); break; case IP6MOPT_AUTH: if (len - i < IP6MOPT_AUTH_MINLEN) { ND_PRINT((ndo, "(auth: trunc)")); goto trunc; } ND_PRINT((ndo, "(auth)")); break; default: if (len - i < IP6MOPT_MINLEN) { ND_PRINT((ndo, "(sopt_type %u: trunc)", bp[i])); goto trunc; } ND_PRINT((ndo, "(type-0x%02x: len=%u)", bp[i], bp[i + 1])); break; } } return 0; trunc: return 1; } Commit Message: CVE-2017-13023/IPv6 mobility: Add a bounds check before fetching data This fixes a buffer over-read discovered by Bhargava Shastry, SecT/TU Berlin. Add a test using the capture file supplied by the reporter(s), modified so the capture file won't cause 'tcpdump: pcap_loop: truncated dump file' CWE ID: CWE-125
mobility_opt_print(netdissect_options *ndo, const u_char *bp, const unsigned len) { unsigned i, optlen; for (i = 0; i < len; i += optlen) { ND_TCHECK(bp[i]); if (bp[i] == IP6MOPT_PAD1) optlen = 1; else { if (i + 1 < len) { ND_TCHECK(bp[i + 1]); optlen = bp[i + 1] + 2; } else goto trunc; } if (i + optlen > len) goto trunc; ND_TCHECK(bp[i + optlen]); switch (bp[i]) { case IP6MOPT_PAD1: ND_PRINT((ndo, "(pad1)")); break; case IP6MOPT_PADN: if (len - i < IP6MOPT_MINLEN) { ND_PRINT((ndo, "(padn: trunc)")); goto trunc; } ND_PRINT((ndo, "(padn)")); break; case IP6MOPT_REFRESH: if (len - i < IP6MOPT_REFRESH_MINLEN) { ND_PRINT((ndo, "(refresh: trunc)")); goto trunc; } /* units of 4 secs */ ND_TCHECK_16BITS(&bp[i+2]); ND_PRINT((ndo, "(refresh: %u)", EXTRACT_16BITS(&bp[i+2]) << 2)); break; case IP6MOPT_ALTCOA: if (len - i < IP6MOPT_ALTCOA_MINLEN) { ND_PRINT((ndo, "(altcoa: trunc)")); goto trunc; } ND_PRINT((ndo, "(alt-CoA: %s)", ip6addr_string(ndo, &bp[i+2]))); break; case IP6MOPT_NONCEID: if (len - i < IP6MOPT_NONCEID_MINLEN) { ND_PRINT((ndo, "(ni: trunc)")); goto trunc; } ND_PRINT((ndo, "(ni: ho=0x%04x co=0x%04x)", EXTRACT_16BITS(&bp[i+2]), EXTRACT_16BITS(&bp[i+4]))); break; case IP6MOPT_AUTH: if (len - i < IP6MOPT_AUTH_MINLEN) { ND_PRINT((ndo, "(auth: trunc)")); goto trunc; } ND_PRINT((ndo, "(auth)")); break; default: if (len - i < IP6MOPT_MINLEN) { ND_PRINT((ndo, "(sopt_type %u: trunc)", bp[i])); goto trunc; } ND_PRINT((ndo, "(type-0x%02x: len=%u)", bp[i], bp[i + 1])); break; } } return 0; trunc: return 1; }
167,868
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: emit_string(const char *str, FILE *out) /* Print a string with spaces replaced by '_' and non-printing characters by * an octal escape. */ { for (; *str; ++str) if (isgraph(UCHAR_MAX & *str)) putc(*str, out); else if (isspace(UCHAR_MAX & *str)) putc('_', out); else fprintf(out, "\\%.3o", *str); } Commit Message: DO NOT MERGE Update libpng to 1.6.20 BUG:23265085 Change-Id: I85199805636d771f3597b691b63bc0bf46084833 (cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82) CWE ID:
emit_string(const char *str, FILE *out) /* Print a string with spaces replaced by '_' and non-printing characters by * an octal escape. */ { for (; *str; ++str) if (isgraph(UCHAR_MAX & *str)) putc(*str, out); else if (isspace(UCHAR_MAX & *str)) putc('_', out); else fprintf(out, "\\%.3o", *str); }
173,731
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: size_t jsuGetFreeStack() { #ifdef ARM void *frame = __builtin_frame_address(0); size_t stackPos = (size_t)((char*)frame); size_t stackEnd = (size_t)((char*)&LINKER_END_VAR); if (stackPos < stackEnd) return 0; // should never happen, but just in case of overflow! return stackPos - stackEnd; #elif defined(LINUX) char ptr; // this is on the stack extern void *STACK_BASE; uint32_t count = (uint32_t)((size_t)STACK_BASE - (size_t)&ptr); return 1000000 - count; // give it 1 megabyte of stack #else return 1000000; // no stack depth check on this platform #endif } Commit Message: Fix stack size detection on Linux (fix #1427) CWE ID: CWE-190
size_t jsuGetFreeStack() { #ifdef ARM void *frame = __builtin_frame_address(0); size_t stackPos = (size_t)((char*)frame); size_t stackEnd = (size_t)((char*)&LINKER_END_VAR); if (stackPos < stackEnd) return 0; // should never happen, but just in case of overflow! return stackPos - stackEnd; #elif defined(LINUX) char ptr; // this is on the stack extern void *STACK_BASE; uint32_t count = (uint32_t)((size_t)STACK_BASE - (size_t)&ptr); const uint32_t max_stack = 1000000; // give it 1 megabyte of stack if (count>max_stack) return 0; return max_stack - count; #else return 1000000; // no stack depth check on this platform #endif }
169,218
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: IHEVCD_ERROR_T ihevcd_parse_slice_data(codec_t *ps_codec) { IHEVCD_ERROR_T ret = (IHEVCD_ERROR_T)IHEVCD_SUCCESS; WORD32 end_of_slice_flag = 0; sps_t *ps_sps; pps_t *ps_pps; slice_header_t *ps_slice_hdr; WORD32 end_of_pic; tile_t *ps_tile, *ps_tile_prev; WORD32 i; WORD32 ctb_addr; WORD32 tile_idx; WORD32 cabac_init_idc; WORD32 ctb_size; WORD32 num_ctb_in_row; WORD32 num_min4x4_in_ctb; WORD32 slice_qp; WORD32 slice_start_ctb_idx; WORD32 tile_start_ctb_idx; ps_slice_hdr = ps_codec->s_parse.ps_slice_hdr_base; ps_pps = ps_codec->s_parse.ps_pps_base; ps_sps = ps_codec->s_parse.ps_sps_base; /* Get current slice header, pps and sps */ ps_slice_hdr += (ps_codec->s_parse.i4_cur_slice_idx & (MAX_SLICE_HDR_CNT - 1)); ps_pps += ps_slice_hdr->i1_pps_id; ps_sps += ps_pps->i1_sps_id; if(0 != ps_codec->s_parse.i4_cur_slice_idx) { if(!ps_slice_hdr->i1_dependent_slice_flag) { ps_codec->s_parse.i4_cur_independent_slice_idx++; if(MAX_SLICE_HDR_CNT == ps_codec->s_parse.i4_cur_independent_slice_idx) ps_codec->s_parse.i4_cur_independent_slice_idx = 0; } } ctb_size = 1 << ps_sps->i1_log2_ctb_size; num_min4x4_in_ctb = (ctb_size / 4) * (ctb_size / 4); num_ctb_in_row = ps_sps->i2_pic_wd_in_ctb; /* Update the parse context */ if(0 == ps_codec->i4_slice_error) { ps_codec->s_parse.i4_ctb_x = ps_slice_hdr->i2_ctb_x; ps_codec->s_parse.i4_ctb_y = ps_slice_hdr->i2_ctb_y; } ps_codec->s_parse.ps_pps = ps_pps; ps_codec->s_parse.ps_sps = ps_sps; ps_codec->s_parse.ps_slice_hdr = ps_slice_hdr; /* Derive Tile positions for the current CTB */ /* Change this to lookup if required */ ihevcd_get_tile_pos(ps_pps, ps_sps, ps_codec->s_parse.i4_ctb_x, ps_codec->s_parse.i4_ctb_y, &ps_codec->s_parse.i4_ctb_tile_x, &ps_codec->s_parse.i4_ctb_tile_y, &tile_idx); ps_codec->s_parse.ps_tile = ps_pps->ps_tile + tile_idx; ps_codec->s_parse.i4_cur_tile_idx = tile_idx; ps_tile = ps_codec->s_parse.ps_tile; if(tile_idx) ps_tile_prev = ps_tile - 1; else ps_tile_prev = ps_tile; /* If the present slice is dependent, then store the previous * independent slices' ctb x and y values for decoding process */ if(0 == ps_codec->i4_slice_error) { if(1 == ps_slice_hdr->i1_dependent_slice_flag) { /*If slice is present at the start of a new tile*/ if((0 == ps_codec->s_parse.i4_ctb_tile_x) && (0 == ps_codec->s_parse.i4_ctb_tile_y)) { ps_codec->s_parse.i4_ctb_slice_x = 0; ps_codec->s_parse.i4_ctb_slice_y = 0; } } if(!ps_slice_hdr->i1_dependent_slice_flag) { ps_codec->s_parse.i4_ctb_slice_x = 0; ps_codec->s_parse.i4_ctb_slice_y = 0; } } /* Frame level initializations */ if((0 == ps_codec->s_parse.i4_ctb_y) && (0 == ps_codec->s_parse.i4_ctb_x)) { ret = ihevcd_parse_pic_init(ps_codec); RETURN_IF((ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS), ret); ps_codec->s_parse.pu4_pic_tu_idx[0] = 0; ps_codec->s_parse.pu4_pic_pu_idx[0] = 0; ps_codec->s_parse.i4_cur_independent_slice_idx = 0; ps_codec->s_parse.i4_ctb_tile_x = 0; ps_codec->s_parse.i4_ctb_tile_y = 0; } { /* Updating the poc list of current slice to ps_mv_buf */ mv_buf_t *ps_mv_buf = ps_codec->s_parse.ps_cur_mv_buf; if(ps_slice_hdr->i1_num_ref_idx_l1_active != 0) { for(i = 0; i < ps_slice_hdr->i1_num_ref_idx_l1_active; i++) { ps_mv_buf->l1_collocated_poc[(ps_codec->s_parse.i4_cur_slice_idx & (MAX_SLICE_HDR_CNT - 1))][i] = ((pic_buf_t *)ps_slice_hdr->as_ref_pic_list1[i].pv_pic_buf)->i4_abs_poc; ps_mv_buf->u1_l1_collocated_poc_lt[(ps_codec->s_parse.i4_cur_slice_idx & (MAX_SLICE_HDR_CNT - 1))][i] = ((pic_buf_t *)ps_slice_hdr->as_ref_pic_list1[i].pv_pic_buf)->u1_used_as_ref; } } if(ps_slice_hdr->i1_num_ref_idx_l0_active != 0) { for(i = 0; i < ps_slice_hdr->i1_num_ref_idx_l0_active; i++) { ps_mv_buf->l0_collocated_poc[(ps_codec->s_parse.i4_cur_slice_idx & (MAX_SLICE_HDR_CNT - 1))][i] = ((pic_buf_t *)ps_slice_hdr->as_ref_pic_list0[i].pv_pic_buf)->i4_abs_poc; ps_mv_buf->u1_l0_collocated_poc_lt[(ps_codec->s_parse.i4_cur_slice_idx & (MAX_SLICE_HDR_CNT - 1))][i] = ((pic_buf_t *)ps_slice_hdr->as_ref_pic_list0[i].pv_pic_buf)->u1_used_as_ref; } } } /*Initialize the low delay flag at the beginning of every slice*/ if((0 == ps_codec->s_parse.i4_ctb_slice_x) || (0 == ps_codec->s_parse.i4_ctb_slice_y)) { /* Lowdelay flag */ WORD32 cur_poc, ref_list_poc, flag = 1; cur_poc = ps_slice_hdr->i4_abs_pic_order_cnt; for(i = 0; i < ps_slice_hdr->i1_num_ref_idx_l0_active; i++) { ref_list_poc = ((mv_buf_t *)ps_slice_hdr->as_ref_pic_list0[i].pv_mv_buf)->i4_abs_poc; if(ref_list_poc > cur_poc) { flag = 0; break; } } if(flag && (ps_slice_hdr->i1_slice_type == BSLICE)) { for(i = 0; i < ps_slice_hdr->i1_num_ref_idx_l1_active; i++) { ref_list_poc = ((mv_buf_t *)ps_slice_hdr->as_ref_pic_list1[i].pv_mv_buf)->i4_abs_poc; if(ref_list_poc > cur_poc) { flag = 0; break; } } } ps_slice_hdr->i1_low_delay_flag = flag; } /* initialize the cabac init idc based on slice type */ if(ps_slice_hdr->i1_slice_type == ISLICE) { cabac_init_idc = 0; } else if(ps_slice_hdr->i1_slice_type == PSLICE) { cabac_init_idc = ps_slice_hdr->i1_cabac_init_flag ? 2 : 1; } else { cabac_init_idc = ps_slice_hdr->i1_cabac_init_flag ? 1 : 2; } slice_qp = ps_slice_hdr->i1_slice_qp_delta + ps_pps->i1_pic_init_qp; slice_qp = CLIP3(slice_qp, 0, 51); /*Update QP value for every indepndent slice or for every dependent slice that begins at the start of a new tile*/ if((0 == ps_slice_hdr->i1_dependent_slice_flag) || ((1 == ps_slice_hdr->i1_dependent_slice_flag) && ((0 == ps_codec->s_parse.i4_ctb_tile_x) && (0 == ps_codec->s_parse.i4_ctb_tile_y)))) { ps_codec->s_parse.u4_qp = slice_qp; } /*Cabac init at the beginning of a slice*/ if((1 == ps_slice_hdr->i1_dependent_slice_flag) && (!((ps_codec->s_parse.i4_ctb_tile_x == 0) && (ps_codec->s_parse.i4_ctb_tile_y == 0)))) { if((0 == ps_pps->i1_entropy_coding_sync_enabled_flag) || (ps_pps->i1_entropy_coding_sync_enabled_flag && (0 != ps_codec->s_parse.i4_ctb_x))) { ihevcd_cabac_reset(&ps_codec->s_parse.s_cabac, &ps_codec->s_parse.s_bitstrm); } } else if((0 == ps_pps->i1_entropy_coding_sync_enabled_flag) || (ps_pps->i1_entropy_coding_sync_enabled_flag && (0 != ps_codec->s_parse.i4_ctb_x))) { ret = ihevcd_cabac_init(&ps_codec->s_parse.s_cabac, &ps_codec->s_parse.s_bitstrm, slice_qp, cabac_init_idc, &gau1_ihevc_cab_ctxts[cabac_init_idc][slice_qp][0]); if(ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS) { ps_codec->i4_slice_error = 1; end_of_slice_flag = 1; ret = (IHEVCD_ERROR_T)IHEVCD_SUCCESS; } } do { { WORD32 cur_ctb_idx = ps_codec->s_parse.i4_ctb_x + ps_codec->s_parse.i4_ctb_y * (ps_sps->i2_pic_wd_in_ctb); if(1 == ps_codec->i4_num_cores && 0 == cur_ctb_idx % RESET_TU_BUF_NCTB) { ps_codec->s_parse.ps_tu = ps_codec->s_parse.ps_pic_tu; ps_codec->s_parse.i4_pic_tu_idx = 0; } } end_of_pic = 0; /* Section:7.3.7 Coding tree unit syntax */ /* coding_tree_unit() inlined here */ /* If number of cores is greater than 1, then add job to the queue */ /* At the start of ctb row parsing in a tile, queue a job for processing the current tile row */ ps_codec->s_parse.i4_ctb_num_pcm_blks = 0; /*At the beginning of each tile-which is not the beginning of a slice, cabac context must be initialized. * Hence, check for the tile beginning here */ if(((0 == ps_codec->s_parse.i4_ctb_tile_x) && (0 == ps_codec->s_parse.i4_ctb_tile_y)) && (!((ps_tile->u1_pos_x == 0) && (ps_tile->u1_pos_y == 0))) && (!((0 == ps_codec->s_parse.i4_ctb_slice_x) && (0 == ps_codec->s_parse.i4_ctb_slice_y)))) { slice_qp = ps_slice_hdr->i1_slice_qp_delta + ps_pps->i1_pic_init_qp; slice_qp = CLIP3(slice_qp, 0, 51); ps_codec->s_parse.u4_qp = slice_qp; ihevcd_get_tile_pos(ps_pps, ps_sps, ps_codec->s_parse.i4_ctb_x, ps_codec->s_parse.i4_ctb_y, &ps_codec->s_parse.i4_ctb_tile_x, &ps_codec->s_parse.i4_ctb_tile_y, &tile_idx); ps_codec->s_parse.ps_tile = ps_pps->ps_tile + tile_idx; ps_codec->s_parse.i4_cur_tile_idx = tile_idx; ps_tile_prev = ps_tile - 1; tile_start_ctb_idx = ps_tile->u1_pos_x + ps_tile->u1_pos_y * (ps_sps->i2_pic_wd_in_ctb); slice_start_ctb_idx = ps_slice_hdr->i2_ctb_x + ps_slice_hdr->i2_ctb_y * (ps_sps->i2_pic_wd_in_ctb); /*For slices that span across multiple tiles*/ if(slice_start_ctb_idx < tile_start_ctb_idx) { /* 2 Cases * 1 - slice spans across frame-width- but does not start from 1st column * 2 - Slice spans across multiple tiles anywhere is a frame */ ps_codec->s_parse.i4_ctb_slice_y = ps_tile->u1_pos_y - ps_slice_hdr->i2_ctb_y; if(!(((ps_slice_hdr->i2_ctb_x + ps_tile_prev->u2_wd) % ps_sps->i2_pic_wd_in_ctb) == ps_tile->u1_pos_x)) //Case 2 { if(ps_slice_hdr->i2_ctb_y <= ps_tile->u1_pos_y) { if(ps_slice_hdr->i2_ctb_x > ps_tile->u1_pos_x) { ps_codec->s_parse.i4_ctb_slice_y -= 1; } } } /*ps_codec->s_parse.i4_ctb_slice_y = ps_tile->u1_pos_y - ps_slice_hdr->i2_ctb_y; if (ps_slice_hdr->i2_ctb_y <= ps_tile->u1_pos_y) { if (ps_slice_hdr->i2_ctb_x > ps_tile->u1_pos_x ) { ps_codec->s_parse.i4_ctb_slice_y -= 1 ; } }*/ } if(!ps_slice_hdr->i1_dependent_slice_flag) { ret = ihevcd_cabac_init(&ps_codec->s_parse.s_cabac, &ps_codec->s_parse.s_bitstrm, slice_qp, cabac_init_idc, &gau1_ihevc_cab_ctxts[cabac_init_idc][slice_qp][0]); if(ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS) { ps_codec->i4_slice_error = 1; end_of_slice_flag = 1; ret = (IHEVCD_ERROR_T)IHEVCD_SUCCESS; } } } /* If number of cores is greater than 1, then add job to the queue */ /* At the start of ctb row parsing in a tile, queue a job for processing the current tile row */ if(0 == ps_codec->s_parse.i4_ctb_tile_x) { if(1 < ps_codec->i4_num_cores) { proc_job_t s_job; IHEVCD_ERROR_T ret; s_job.i4_cmd = CMD_PROCESS; s_job.i2_ctb_cnt = (WORD16)ps_tile->u2_wd; s_job.i2_ctb_x = (WORD16)ps_codec->s_parse.i4_ctb_x; s_job.i2_ctb_y = (WORD16)ps_codec->s_parse.i4_ctb_y; s_job.i2_slice_idx = (WORD16)ps_codec->s_parse.i4_cur_slice_idx; s_job.i4_tu_coeff_data_ofst = (UWORD8 *)ps_codec->s_parse.pv_tu_coeff_data - (UWORD8 *)ps_codec->s_parse.pv_pic_tu_coeff_data; ret = ihevcd_jobq_queue((jobq_t *)ps_codec->s_parse.pv_proc_jobq, &s_job, sizeof(proc_job_t), 1); if(ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS) return ret; } else { process_ctxt_t *ps_proc = &ps_codec->as_process[0]; WORD32 tu_coeff_data_ofst = (UWORD8 *)ps_codec->s_parse.pv_tu_coeff_data - (UWORD8 *)ps_codec->s_parse.pv_pic_tu_coeff_data; /* If the codec is running in single core mode, * initialize zeroth process context * TODO: Dual core mode might need a different implementation instead of jobq */ ps_proc->i4_ctb_cnt = ps_tile->u2_wd; ps_proc->i4_ctb_x = ps_codec->s_parse.i4_ctb_x; ps_proc->i4_ctb_y = ps_codec->s_parse.i4_ctb_y; ps_proc->i4_cur_slice_idx = ps_codec->s_parse.i4_cur_slice_idx; ihevcd_init_proc_ctxt(ps_proc, tu_coeff_data_ofst); } } /* Restore cabac context model from top right CTB if entropy sync is enabled */ if(ps_pps->i1_entropy_coding_sync_enabled_flag) { /*TODO Handle single CTB and top-right belonging to a different slice */ if(0 == ps_codec->s_parse.i4_ctb_x) { WORD32 default_ctxt = 0; if((0 == ps_codec->s_parse.i4_ctb_slice_y) && (!ps_slice_hdr->i1_dependent_slice_flag)) default_ctxt = 1; if(1 == ps_sps->i2_pic_wd_in_ctb) default_ctxt = 1; ps_codec->s_parse.u4_qp = slice_qp; if(default_ctxt) { ret = ihevcd_cabac_init(&ps_codec->s_parse.s_cabac, &ps_codec->s_parse.s_bitstrm, slice_qp, cabac_init_idc, &gau1_ihevc_cab_ctxts[cabac_init_idc][slice_qp][0]); if(ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS) { ps_codec->i4_slice_error = 1; end_of_slice_flag = 1; ret = (IHEVCD_ERROR_T)IHEVCD_SUCCESS; } } else { ret = ihevcd_cabac_init(&ps_codec->s_parse.s_cabac, &ps_codec->s_parse.s_bitstrm, slice_qp, cabac_init_idc, (const UWORD8 *)&ps_codec->s_parse.s_cabac.au1_ctxt_models_sync); if(ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS) { ps_codec->i4_slice_error = 1; end_of_slice_flag = 1; ret = (IHEVCD_ERROR_T)IHEVCD_SUCCESS; } } } } if(0 == ps_codec->i4_slice_error) { if(ps_slice_hdr->i1_slice_sao_luma_flag || ps_slice_hdr->i1_slice_sao_chroma_flag) ihevcd_parse_sao(ps_codec); } else { sao_t *ps_sao = ps_codec->s_parse.ps_pic_sao + ps_codec->s_parse.i4_ctb_x + ps_codec->s_parse.i4_ctb_y * ps_sps->i2_pic_wd_in_ctb; /* Default values */ ps_sao->b3_y_type_idx = 0; ps_sao->b3_cb_type_idx = 0; ps_sao->b3_cr_type_idx = 0; } { WORD32 ctb_indx; ctb_indx = ps_codec->s_parse.i4_ctb_x + ps_sps->i2_pic_wd_in_ctb * ps_codec->s_parse.i4_ctb_y; ps_codec->s_parse.s_bs_ctxt.pu1_pic_qp_const_in_ctb[ctb_indx >> 3] |= (1 << (ctb_indx & 7)); { UWORD16 *pu1_slice_idx = ps_codec->s_parse.pu1_slice_idx; pu1_slice_idx[ctb_indx] = ps_codec->s_parse.i4_cur_independent_slice_idx; } } if(0 == ps_codec->i4_slice_error) { tu_t *ps_tu = ps_codec->s_parse.ps_tu; WORD32 i4_tu_cnt = ps_codec->s_parse.s_cu.i4_tu_cnt; WORD32 i4_pic_tu_idx = ps_codec->s_parse.i4_pic_tu_idx; pu_t *ps_pu = ps_codec->s_parse.ps_pu; WORD32 i4_pic_pu_idx = ps_codec->s_parse.i4_pic_pu_idx; UWORD8 *pu1_tu_coeff_data = (UWORD8 *)ps_codec->s_parse.pv_tu_coeff_data; ret = ihevcd_parse_coding_quadtree(ps_codec, (ps_codec->s_parse.i4_ctb_x << ps_sps->i1_log2_ctb_size), (ps_codec->s_parse.i4_ctb_y << ps_sps->i1_log2_ctb_size), ps_sps->i1_log2_ctb_size, 0); /* Check for error */ if (ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS) { /* Reset tu and pu parameters, and signal current ctb as skip */ WORD32 pu_skip_wd, pu_skip_ht; WORD32 rows_remaining, cols_remaining; WORD32 tu_coeff_data_reset_size; /* Set pu wd and ht based on whether the ctb is complete or not */ rows_remaining = ps_sps->i2_pic_height_in_luma_samples - (ps_codec->s_parse.i4_ctb_y << ps_sps->i1_log2_ctb_size); pu_skip_ht = MIN(ctb_size, rows_remaining); cols_remaining = ps_sps->i2_pic_width_in_luma_samples - (ps_codec->s_parse.i4_ctb_x << ps_sps->i1_log2_ctb_size); pu_skip_wd = MIN(ctb_size, cols_remaining); ps_codec->s_parse.ps_tu = ps_tu; ps_codec->s_parse.s_cu.i4_tu_cnt = i4_tu_cnt; ps_codec->s_parse.i4_pic_tu_idx = i4_pic_tu_idx; ps_codec->s_parse.ps_pu = ps_pu; ps_codec->s_parse.i4_pic_pu_idx = i4_pic_pu_idx; ps_tu->b1_cb_cbf = 0; ps_tu->b1_cr_cbf = 0; ps_tu->b1_y_cbf = 0; ps_tu->b4_pos_x = 0; ps_tu->b4_pos_y = 0; ps_tu->b1_transquant_bypass = 0; ps_tu->b3_size = (ps_sps->i1_log2_ctb_size - 2); ps_tu->b7_qp = ps_codec->s_parse.u4_qp; ps_tu->b3_chroma_intra_mode_idx = INTRA_PRED_CHROMA_IDX_NONE; ps_tu->b6_luma_intra_mode = INTRA_PRED_NONE; ps_tu->b1_first_tu_in_cu = 1; tu_coeff_data_reset_size = (UWORD8 *)ps_codec->s_parse.pv_tu_coeff_data - pu1_tu_coeff_data; memset(pu1_tu_coeff_data, 0, tu_coeff_data_reset_size); ps_codec->s_parse.pv_tu_coeff_data = (void *)pu1_tu_coeff_data; ps_codec->s_parse.ps_tu++; ps_codec->s_parse.s_cu.i4_tu_cnt++; ps_codec->s_parse.i4_pic_tu_idx++; ps_codec->s_parse.s_cu.i4_pred_mode = PRED_MODE_SKIP; ps_codec->s_parse.s_cu.i4_part_mode = PART_2Nx2N; ps_pu->b2_part_idx = 0; ps_pu->b4_pos_x = 0; ps_pu->b4_pos_y = 0; ps_pu->b4_wd = (pu_skip_wd >> 2) - 1; ps_pu->b4_ht = (pu_skip_ht >> 2) - 1; ps_pu->b1_intra_flag = 0; ps_pu->b3_part_mode = ps_codec->s_parse.s_cu.i4_part_mode; ps_pu->b1_merge_flag = 1; ps_pu->b3_merge_idx = 0; ps_codec->s_parse.ps_pu++; ps_codec->s_parse.i4_pic_pu_idx++; /* Set slice error to suppress further parsing and * signal end of slice. */ ps_codec->i4_slice_error = 1; end_of_slice_flag = 1; ret = (IHEVCD_ERROR_T)IHEVCD_SUCCESS; } } else { tu_t *ps_tu = ps_codec->s_parse.ps_tu; pu_t *ps_pu = ps_codec->s_parse.ps_pu; WORD32 pu_skip_wd, pu_skip_ht; WORD32 rows_remaining, cols_remaining; /* Set pu wd and ht based on whether the ctb is complete or not */ rows_remaining = ps_sps->i2_pic_height_in_luma_samples - (ps_codec->s_parse.i4_ctb_y << ps_sps->i1_log2_ctb_size); pu_skip_ht = MIN(ctb_size, rows_remaining); cols_remaining = ps_sps->i2_pic_width_in_luma_samples - (ps_codec->s_parse.i4_ctb_x << ps_sps->i1_log2_ctb_size); pu_skip_wd = MIN(ctb_size, cols_remaining); ps_tu->b1_cb_cbf = 0; ps_tu->b1_cr_cbf = 0; ps_tu->b1_y_cbf = 0; ps_tu->b4_pos_x = 0; ps_tu->b4_pos_y = 0; ps_tu->b1_transquant_bypass = 0; ps_tu->b3_size = (ps_sps->i1_log2_ctb_size - 2); ps_tu->b7_qp = ps_codec->s_parse.u4_qp; ps_tu->b3_chroma_intra_mode_idx = INTRA_PRED_CHROMA_IDX_NONE; ps_tu->b6_luma_intra_mode = INTRA_PRED_NONE; ps_tu->b1_first_tu_in_cu = 1; ps_codec->s_parse.ps_tu++; ps_codec->s_parse.s_cu.i4_tu_cnt++; ps_codec->s_parse.i4_pic_tu_idx++; ps_codec->s_parse.s_cu.i4_pred_mode = PRED_MODE_SKIP; ps_codec->s_parse.s_cu.i4_part_mode = PART_2Nx2N; ps_pu->b2_part_idx = 0; ps_pu->b4_pos_x = 0; ps_pu->b4_pos_y = 0; ps_pu->b4_wd = (pu_skip_wd >> 2) - 1; ps_pu->b4_ht = (pu_skip_ht >> 2) - 1; ps_pu->b1_intra_flag = 0; ps_pu->b3_part_mode = ps_codec->s_parse.s_cu.i4_part_mode; ps_pu->b1_merge_flag = 1; ps_pu->b3_merge_idx = 0; ps_codec->s_parse.ps_pu++; ps_codec->s_parse.i4_pic_pu_idx++; } if(0 == ps_codec->i4_slice_error) end_of_slice_flag = ihevcd_cabac_decode_terminate(&ps_codec->s_parse.s_cabac, &ps_codec->s_parse.s_bitstrm); AEV_TRACE("end_of_slice_flag", end_of_slice_flag, ps_codec->s_parse.s_cabac.u4_range); /* In case of tiles or entropy sync, terminate cabac and copy cabac context backed up at the end of top-right CTB */ if(ps_pps->i1_tiles_enabled_flag || ps_pps->i1_entropy_coding_sync_enabled_flag) { WORD32 end_of_tile = 0; WORD32 end_of_tile_row = 0; /* Take a back up of cabac context models if entropy sync is enabled */ if(ps_pps->i1_entropy_coding_sync_enabled_flag || ps_pps->i1_tiles_enabled_flag) { if(1 == ps_codec->s_parse.i4_ctb_x) { WORD32 size = sizeof(ps_codec->s_parse.s_cabac.au1_ctxt_models); memcpy(&ps_codec->s_parse.s_cabac.au1_ctxt_models_sync, &ps_codec->s_parse.s_cabac.au1_ctxt_models, size); } } /* Since tiles and entropy sync are not enabled simultaneously, the following will not result in any problems */ if((ps_codec->s_parse.i4_ctb_tile_x + 1) == (ps_tile->u2_wd)) { end_of_tile_row = 1; if((ps_codec->s_parse.i4_ctb_tile_y + 1) == ps_tile->u2_ht) end_of_tile = 1; } if((0 == end_of_slice_flag) && ((ps_pps->i1_tiles_enabled_flag && end_of_tile) || (ps_pps->i1_entropy_coding_sync_enabled_flag && end_of_tile_row))) { WORD32 end_of_sub_stream_one_bit; end_of_sub_stream_one_bit = ihevcd_cabac_decode_terminate(&ps_codec->s_parse.s_cabac, &ps_codec->s_parse.s_bitstrm); AEV_TRACE("end_of_sub_stream_one_bit", end_of_sub_stream_one_bit, ps_codec->s_parse.s_cabac.u4_range); /* TODO: Remove the check for offset when HM is updated to include a byte unconditionally even for aligned location */ /* For Ittiam streams this check should not be there, for HM9.1 streams this should be there */ if(ps_codec->s_parse.s_bitstrm.u4_bit_ofst % 8) ihevcd_bits_flush_to_byte_boundary(&ps_codec->s_parse.s_bitstrm); UNUSED(end_of_sub_stream_one_bit); } } { WORD32 ctb_indx; ctb_addr = ps_codec->s_parse.i4_ctb_y * num_ctb_in_row + ps_codec->s_parse.i4_ctb_x; ctb_indx = ++ctb_addr; /* Store pu_idx for next CTB in frame level pu_idx array */ if((ps_tile->u2_wd == (ps_codec->s_parse.i4_ctb_tile_x + 1)) && (ps_tile->u2_wd != ps_sps->i2_pic_wd_in_ctb)) { ctb_indx = (ps_sps->i2_pic_wd_in_ctb * (ps_codec->s_parse.i4_ctb_tile_y + 1 + ps_tile->u1_pos_y)) + ps_tile->u1_pos_x; //idx is the beginning of next row in current tile. if(ps_tile->u2_ht == (ps_codec->s_parse.i4_ctb_tile_y + 1)) { if((ps_tile->u2_wd + ps_tile->u1_pos_x == ps_sps->i2_pic_wd_in_ctb) && ((ps_tile->u2_ht + ps_tile->u1_pos_y == ps_sps->i2_pic_ht_in_ctb))) { ctb_indx = ctb_addr; //Next continuous ctb address } else //Not last tile's end , but a tile end { tile_t *ps_next_tile = ps_codec->s_parse.ps_tile + 1; ctb_indx = ps_next_tile->u1_pos_x + (ps_next_tile->u1_pos_y * ps_sps->i2_pic_wd_in_ctb); //idx is the beginning of first row in next tile. } } } ps_codec->s_parse.pu4_pic_pu_idx[ctb_indx] = ps_codec->s_parse.i4_pic_pu_idx; ps_codec->s_parse.i4_next_pu_ctb_cnt = ctb_indx; ps_codec->s_parse.pu1_pu_map += num_min4x4_in_ctb; /* Store tu_idx for next CTB in frame level tu_idx array */ if(1 == ps_codec->i4_num_cores) { ctb_indx = (0 == ctb_addr % RESET_TU_BUF_NCTB) ? RESET_TU_BUF_NCTB : ctb_addr % RESET_TU_BUF_NCTB; if((ps_tile->u2_wd == (ps_codec->s_parse.i4_ctb_tile_x + 1)) && (ps_tile->u2_wd != ps_sps->i2_pic_wd_in_ctb)) { ctb_indx = (ps_sps->i2_pic_wd_in_ctb * (ps_codec->s_parse.i4_ctb_tile_y + 1 + ps_tile->u1_pos_y)) + ps_tile->u1_pos_x; //idx is the beginning of next row in current tile. if(ps_tile->u2_ht == (ps_codec->s_parse.i4_ctb_tile_y + 1)) { if((ps_tile->u2_wd + ps_tile->u1_pos_x == ps_sps->i2_pic_wd_in_ctb) && ((ps_tile->u2_ht + ps_tile->u1_pos_y == ps_sps->i2_pic_ht_in_ctb))) { ctb_indx = (0 == ctb_addr % RESET_TU_BUF_NCTB) ? RESET_TU_BUF_NCTB : ctb_addr % RESET_TU_BUF_NCTB; } else //Not last tile's end , but a tile end { tile_t *ps_next_tile = ps_codec->s_parse.ps_tile + 1; ctb_indx = ps_next_tile->u1_pos_x + (ps_next_tile->u1_pos_y * ps_sps->i2_pic_wd_in_ctb); //idx is the beginning of first row in next tile. } } } ps_codec->s_parse.i4_next_tu_ctb_cnt = ctb_indx; ps_codec->s_parse.pu4_pic_tu_idx[ctb_indx] = ps_codec->s_parse.i4_pic_tu_idx; } else { ctb_indx = ctb_addr; if((ps_tile->u2_wd == (ps_codec->s_parse.i4_ctb_tile_x + 1)) && (ps_tile->u2_wd != ps_sps->i2_pic_wd_in_ctb)) { ctb_indx = (ps_sps->i2_pic_wd_in_ctb * (ps_codec->s_parse.i4_ctb_tile_y + 1 + ps_tile->u1_pos_y)) + ps_tile->u1_pos_x; //idx is the beginning of next row in current tile. if(ps_tile->u2_ht == (ps_codec->s_parse.i4_ctb_tile_y + 1)) { if((ps_tile->u2_wd + ps_tile->u1_pos_x == ps_sps->i2_pic_wd_in_ctb) && ((ps_tile->u2_ht + ps_tile->u1_pos_y == ps_sps->i2_pic_ht_in_ctb))) { ctb_indx = ctb_addr; } else //Not last tile's end , but a tile end { tile_t *ps_next_tile = ps_codec->s_parse.ps_tile + 1; ctb_indx = ps_next_tile->u1_pos_x + (ps_next_tile->u1_pos_y * ps_sps->i2_pic_wd_in_ctb); //idx is the beginning of first row in next tile. } } } ps_codec->s_parse.i4_next_tu_ctb_cnt = ctb_indx; ps_codec->s_parse.pu4_pic_tu_idx[ctb_indx] = ps_codec->s_parse.i4_pic_tu_idx; } ps_codec->s_parse.pu1_tu_map += num_min4x4_in_ctb; } if(ps_codec->i4_num_cores <= MV_PRED_NUM_CORES_THRESHOLD) { /*************************************************/ /**************** MV pred **********************/ /*************************************************/ WORD8 u1_top_ctb_avail = 1; WORD8 u1_left_ctb_avail = 1; WORD8 u1_top_lt_ctb_avail = 1; WORD8 u1_top_rt_ctb_avail = 1; WORD16 i2_wd_in_ctb; tile_start_ctb_idx = ps_tile->u1_pos_x + ps_tile->u1_pos_y * (ps_sps->i2_pic_wd_in_ctb); slice_start_ctb_idx = ps_slice_hdr->i2_ctb_x + ps_slice_hdr->i2_ctb_y * (ps_sps->i2_pic_wd_in_ctb); if((slice_start_ctb_idx < tile_start_ctb_idx)) { i2_wd_in_ctb = ps_sps->i2_pic_wd_in_ctb; } else { i2_wd_in_ctb = ps_tile->u2_wd; } /* slice and tile boundaries */ if((0 == ps_codec->s_parse.i4_ctb_y) || (0 == ps_codec->s_parse.i4_ctb_tile_y)) { u1_top_ctb_avail = 0; u1_top_lt_ctb_avail = 0; u1_top_rt_ctb_avail = 0; } if((0 == ps_codec->s_parse.i4_ctb_x) || (0 == ps_codec->s_parse.i4_ctb_tile_x)) { u1_left_ctb_avail = 0; u1_top_lt_ctb_avail = 0; if((0 == ps_codec->s_parse.i4_ctb_slice_y) || (0 == ps_codec->s_parse.i4_ctb_tile_y)) { u1_top_ctb_avail = 0; if((i2_wd_in_ctb - 1) != ps_codec->s_parse.i4_ctb_slice_x) //TODO: For tile, not implemented { u1_top_rt_ctb_avail = 0; } } } /*For slices not beginning at start of a ctb row*/ else if(ps_codec->s_parse.i4_ctb_x > 0) { if((0 == ps_codec->s_parse.i4_ctb_slice_y) || (0 == ps_codec->s_parse.i4_ctb_tile_y)) { u1_top_ctb_avail = 0; u1_top_lt_ctb_avail = 0; if(0 == ps_codec->s_parse.i4_ctb_slice_x) { u1_left_ctb_avail = 0; } if((i2_wd_in_ctb - 1) != ps_codec->s_parse.i4_ctb_slice_x) { u1_top_rt_ctb_avail = 0; } } else if((1 == ps_codec->s_parse.i4_ctb_slice_y) && (0 == ps_codec->s_parse.i4_ctb_slice_x)) { u1_top_lt_ctb_avail = 0; } } if(((ps_sps->i2_pic_wd_in_ctb - 1) == ps_codec->s_parse.i4_ctb_x) || ((ps_tile->u2_wd - 1) == ps_codec->s_parse.i4_ctb_tile_x)) { u1_top_rt_ctb_avail = 0; } if(PSLICE == ps_slice_hdr->i1_slice_type || BSLICE == ps_slice_hdr->i1_slice_type) { mv_ctxt_t s_mv_ctxt; process_ctxt_t *ps_proc; UWORD32 *pu4_ctb_top_pu_idx; UWORD32 *pu4_ctb_left_pu_idx; UWORD32 *pu4_ctb_top_left_pu_idx; WORD32 i4_ctb_pu_cnt; WORD32 cur_ctb_idx; WORD32 next_ctb_idx; WORD32 cur_pu_idx; ps_proc = &ps_codec->as_process[(ps_codec->i4_num_cores == 1) ? 1 : (ps_codec->i4_num_cores - 1)]; cur_ctb_idx = ps_codec->s_parse.i4_ctb_x + ps_codec->s_parse.i4_ctb_y * (ps_sps->i2_pic_wd_in_ctb); next_ctb_idx = ps_codec->s_parse.i4_next_pu_ctb_cnt; i4_ctb_pu_cnt = ps_codec->s_parse.pu4_pic_pu_idx[next_ctb_idx] - ps_codec->s_parse.pu4_pic_pu_idx[cur_ctb_idx]; cur_pu_idx = ps_codec->s_parse.pu4_pic_pu_idx[cur_ctb_idx]; pu4_ctb_top_pu_idx = ps_proc->pu4_pic_pu_idx_top + (ps_codec->s_parse.i4_ctb_x * ctb_size / MIN_PU_SIZE); pu4_ctb_left_pu_idx = ps_proc->pu4_pic_pu_idx_left; pu4_ctb_top_left_pu_idx = &ps_proc->u4_ctb_top_left_pu_idx; /* Initializing s_mv_ctxt */ { s_mv_ctxt.ps_pps = ps_pps; s_mv_ctxt.ps_sps = ps_sps; s_mv_ctxt.ps_slice_hdr = ps_slice_hdr; s_mv_ctxt.i4_ctb_x = ps_codec->s_parse.i4_ctb_x; s_mv_ctxt.i4_ctb_y = ps_codec->s_parse.i4_ctb_y; s_mv_ctxt.ps_pu = &ps_codec->s_parse.ps_pic_pu[cur_pu_idx]; s_mv_ctxt.ps_pic_pu = ps_codec->s_parse.ps_pic_pu; s_mv_ctxt.ps_tile = ps_tile; s_mv_ctxt.pu4_pic_pu_idx_map = ps_proc->pu4_pic_pu_idx_map; s_mv_ctxt.pu4_pic_pu_idx = ps_codec->s_parse.pu4_pic_pu_idx; s_mv_ctxt.pu1_pic_pu_map = ps_codec->s_parse.pu1_pic_pu_map; s_mv_ctxt.i4_ctb_pu_cnt = i4_ctb_pu_cnt; s_mv_ctxt.i4_ctb_start_pu_idx = cur_pu_idx; s_mv_ctxt.u1_top_ctb_avail = u1_top_ctb_avail; s_mv_ctxt.u1_top_rt_ctb_avail = u1_top_rt_ctb_avail; s_mv_ctxt.u1_top_lt_ctb_avail = u1_top_lt_ctb_avail; s_mv_ctxt.u1_left_ctb_avail = u1_left_ctb_avail; } ihevcd_get_mv_ctb(&s_mv_ctxt, pu4_ctb_top_pu_idx, pu4_ctb_left_pu_idx, pu4_ctb_top_left_pu_idx); } else { WORD32 num_minpu_in_ctb = (ctb_size / MIN_PU_SIZE) * (ctb_size / MIN_PU_SIZE); UWORD8 *pu1_pic_pu_map_ctb = ps_codec->s_parse.pu1_pic_pu_map + (ps_codec->s_parse.i4_ctb_x + ps_codec->s_parse.i4_ctb_y * ps_sps->i2_pic_wd_in_ctb) * num_minpu_in_ctb; process_ctxt_t *ps_proc = &ps_codec->as_process[(ps_codec->i4_num_cores == 1) ? 1 : (ps_codec->i4_num_cores - 1)]; WORD32 row, col; WORD32 pu_cnt; WORD32 num_pu_per_ctb; WORD32 cur_ctb_idx; WORD32 next_ctb_idx; WORD32 ctb_start_pu_idx; UWORD32 *pu4_nbr_pu_idx = ps_proc->pu4_pic_pu_idx_map; WORD32 nbr_pu_idx_strd = MAX_CTB_SIZE / MIN_PU_SIZE + 2; pu_t *ps_pu; for(row = 0; row < ctb_size / MIN_PU_SIZE; row++) { for(col = 0; col < ctb_size / MIN_PU_SIZE; col++) { pu1_pic_pu_map_ctb[row * ctb_size / MIN_PU_SIZE + col] = 0; } } /* Neighbor PU idx update inside CTB */ /* 1byte per 4x4. Indicates the PU idx that 4x4 block belongs to */ cur_ctb_idx = ps_codec->s_parse.i4_ctb_x + ps_codec->s_parse.i4_ctb_y * (ps_sps->i2_pic_wd_in_ctb); next_ctb_idx = ps_codec->s_parse.i4_next_pu_ctb_cnt; num_pu_per_ctb = ps_codec->s_parse.pu4_pic_pu_idx[next_ctb_idx] - ps_codec->s_parse.pu4_pic_pu_idx[cur_ctb_idx]; ctb_start_pu_idx = ps_codec->s_parse.pu4_pic_pu_idx[cur_ctb_idx]; ps_pu = &ps_codec->s_parse.ps_pic_pu[ctb_start_pu_idx]; for(pu_cnt = 0; pu_cnt < num_pu_per_ctb; pu_cnt++, ps_pu++) { UWORD32 cur_pu_idx; WORD32 pu_ht = (ps_pu->b4_ht + 1) << 2; WORD32 pu_wd = (ps_pu->b4_wd + 1) << 2; cur_pu_idx = ctb_start_pu_idx + pu_cnt; for(row = 0; row < pu_ht / MIN_PU_SIZE; row++) for(col = 0; col < pu_wd / MIN_PU_SIZE; col++) pu4_nbr_pu_idx[(1 + ps_pu->b4_pos_x + col) + (1 + ps_pu->b4_pos_y + row) * nbr_pu_idx_strd] = cur_pu_idx; } /* Updating Top and Left pointers */ { WORD32 rows_remaining = ps_sps->i2_pic_height_in_luma_samples - (ps_codec->s_parse.i4_ctb_y << ps_sps->i1_log2_ctb_size); WORD32 ctb_size_left = MIN(ctb_size, rows_remaining); /* Top Left */ /* saving top left before updating top ptr, as updating top ptr will overwrite the top left for the next ctb */ ps_proc->u4_ctb_top_left_pu_idx = ps_proc->pu4_pic_pu_idx_top[(ps_codec->s_parse.i4_ctb_x * ctb_size / MIN_PU_SIZE) + ctb_size / MIN_PU_SIZE - 1]; for(i = 0; i < ctb_size / MIN_PU_SIZE; i++) { /* Left */ /* Last column of au4_nbr_pu_idx */ ps_proc->pu4_pic_pu_idx_left[i] = pu4_nbr_pu_idx[(ctb_size / MIN_PU_SIZE) + (i + 1) * nbr_pu_idx_strd]; /* Top */ /* Last row of au4_nbr_pu_idx */ ps_proc->pu4_pic_pu_idx_top[(ps_codec->s_parse.i4_ctb_x * ctb_size / MIN_PU_SIZE) + i] = pu4_nbr_pu_idx[(ctb_size_left / MIN_PU_SIZE) * nbr_pu_idx_strd + i + 1]; } } } /*************************************************/ /****************** BS, QP *********************/ /*************************************************/ /* Check if deblock is disabled for the current slice or if it is disabled for the current picture * because of disable deblock api */ if(0 == ps_codec->i4_disable_deblk_pic) { if((0 == ps_slice_hdr->i1_slice_disable_deblocking_filter_flag) && (0 == ps_codec->i4_slice_error)) { WORD32 i4_ctb_tu_cnt; WORD32 cur_ctb_idx, next_ctb_idx; WORD32 cur_pu_idx; WORD32 cur_tu_idx; process_ctxt_t *ps_proc; ps_proc = &ps_codec->as_process[(ps_codec->i4_num_cores == 1) ? 1 : (ps_codec->i4_num_cores - 1)]; cur_ctb_idx = ps_codec->s_parse.i4_ctb_x + ps_codec->s_parse.i4_ctb_y * (ps_sps->i2_pic_wd_in_ctb); cur_pu_idx = ps_codec->s_parse.pu4_pic_pu_idx[cur_ctb_idx]; next_ctb_idx = ps_codec->s_parse.i4_next_tu_ctb_cnt; if(1 == ps_codec->i4_num_cores) { i4_ctb_tu_cnt = ps_codec->s_parse.pu4_pic_tu_idx[next_ctb_idx] - ps_codec->s_parse.pu4_pic_tu_idx[cur_ctb_idx % RESET_TU_BUF_NCTB]; cur_tu_idx = ps_codec->s_parse.pu4_pic_tu_idx[cur_ctb_idx % RESET_TU_BUF_NCTB]; } else { i4_ctb_tu_cnt = ps_codec->s_parse.pu4_pic_tu_idx[next_ctb_idx] - ps_codec->s_parse.pu4_pic_tu_idx[cur_ctb_idx]; cur_tu_idx = ps_codec->s_parse.pu4_pic_tu_idx[cur_ctb_idx]; } ps_codec->s_parse.s_bs_ctxt.ps_pps = ps_codec->s_parse.ps_pps; ps_codec->s_parse.s_bs_ctxt.ps_sps = ps_codec->s_parse.ps_sps; ps_codec->s_parse.s_bs_ctxt.ps_codec = ps_codec; ps_codec->s_parse.s_bs_ctxt.i4_ctb_tu_cnt = i4_ctb_tu_cnt; ps_codec->s_parse.s_bs_ctxt.i4_ctb_x = ps_codec->s_parse.i4_ctb_x; ps_codec->s_parse.s_bs_ctxt.i4_ctb_y = ps_codec->s_parse.i4_ctb_y; ps_codec->s_parse.s_bs_ctxt.i4_ctb_tile_x = ps_codec->s_parse.i4_ctb_tile_x; ps_codec->s_parse.s_bs_ctxt.i4_ctb_tile_y = ps_codec->s_parse.i4_ctb_tile_y; ps_codec->s_parse.s_bs_ctxt.i4_ctb_slice_x = ps_codec->s_parse.i4_ctb_slice_x; ps_codec->s_parse.s_bs_ctxt.i4_ctb_slice_y = ps_codec->s_parse.i4_ctb_slice_y; ps_codec->s_parse.s_bs_ctxt.ps_tu = &ps_codec->s_parse.ps_pic_tu[cur_tu_idx]; ps_codec->s_parse.s_bs_ctxt.ps_pu = &ps_codec->s_parse.ps_pic_pu[cur_pu_idx]; ps_codec->s_parse.s_bs_ctxt.pu4_pic_pu_idx_map = ps_proc->pu4_pic_pu_idx_map; ps_codec->s_parse.s_bs_ctxt.i4_next_pu_ctb_cnt = ps_codec->s_parse.i4_next_pu_ctb_cnt; ps_codec->s_parse.s_bs_ctxt.i4_next_tu_ctb_cnt = ps_codec->s_parse.i4_next_tu_ctb_cnt; ps_codec->s_parse.s_bs_ctxt.pu1_slice_idx = ps_codec->s_parse.pu1_slice_idx; ps_codec->s_parse.s_bs_ctxt.ps_slice_hdr = ps_codec->s_parse.ps_slice_hdr; ps_codec->s_parse.s_bs_ctxt.ps_tile = ps_codec->s_parse.ps_tile; if(ISLICE == ps_slice_hdr->i1_slice_type) { ihevcd_ctb_boundary_strength_islice(&ps_codec->s_parse.s_bs_ctxt); } else { ihevcd_ctb_boundary_strength_pbslice(&ps_codec->s_parse.s_bs_ctxt); } } else { WORD32 bs_strd = (ps_sps->i2_pic_wd_in_ctb + 1) * (ctb_size * ctb_size / 8 / 16); UWORD32 *pu4_vert_bs = (UWORD32 *)((UWORD8 *)ps_codec->s_parse.s_bs_ctxt.pu4_pic_vert_bs + ps_codec->s_parse.i4_ctb_x * (ctb_size * ctb_size / 8 / 16) + ps_codec->s_parse.i4_ctb_y * bs_strd); UWORD32 *pu4_horz_bs = (UWORD32 *)((UWORD8 *)ps_codec->s_parse.s_bs_ctxt.pu4_pic_horz_bs + ps_codec->s_parse.i4_ctb_x * (ctb_size * ctb_size / 8 / 16) + ps_codec->s_parse.i4_ctb_y * bs_strd); memset(pu4_vert_bs, 0, (ctb_size / 8 + 1) * (ctb_size / 4) / 8 * 2); memset(pu4_horz_bs, 0, (ctb_size / 8) * (ctb_size / 4) / 8 * 2); } } } /* Update the parse status map */ { sps_t *ps_sps = ps_codec->s_parse.ps_sps; UWORD8 *pu1_buf; WORD32 idx; idx = (ps_codec->s_parse.i4_ctb_x); idx += ((ps_codec->s_parse.i4_ctb_y) * ps_sps->i2_pic_wd_in_ctb); pu1_buf = (ps_codec->pu1_parse_map + idx); *pu1_buf = 1; } /* Increment CTB x and y positions */ ps_codec->s_parse.i4_ctb_tile_x++; ps_codec->s_parse.i4_ctb_x++; ps_codec->s_parse.i4_ctb_slice_x++; /*If tiles are enabled, handle the slice counters differently*/ if(ps_pps->i1_tiles_enabled_flag) { tile_start_ctb_idx = ps_tile->u1_pos_x + ps_tile->u1_pos_y * (ps_sps->i2_pic_wd_in_ctb); slice_start_ctb_idx = ps_slice_hdr->i2_ctb_x + ps_slice_hdr->i2_ctb_y * (ps_sps->i2_pic_wd_in_ctb); if((slice_start_ctb_idx < tile_start_ctb_idx)) { if(ps_codec->s_parse.i4_ctb_slice_x == (ps_tile->u1_pos_x + ps_tile->u2_wd)) { /* Reached end of slice row within a tile /frame */ ps_codec->s_parse.i4_ctb_slice_y++; ps_codec->s_parse.i4_ctb_slice_x = ps_tile->u1_pos_x; //todo:Check } } else if(ps_codec->s_parse.i4_ctb_slice_x == (ps_tile->u2_wd)) { ps_codec->s_parse.i4_ctb_slice_y++; ps_codec->s_parse.i4_ctb_slice_x = 0; } } else { if(ps_codec->s_parse.i4_ctb_slice_x == ps_tile->u2_wd) { /* Reached end of slice row within a tile /frame */ ps_codec->s_parse.i4_ctb_slice_y++; ps_codec->s_parse.i4_ctb_slice_x = 0; } } if(ps_codec->s_parse.i4_ctb_tile_x == (ps_tile->u2_wd)) { /* Reached end of tile row */ ps_codec->s_parse.i4_ctb_tile_x = 0; ps_codec->s_parse.i4_ctb_x = ps_tile->u1_pos_x; ps_codec->s_parse.i4_ctb_tile_y++; ps_codec->s_parse.i4_ctb_y++; if(ps_codec->s_parse.i4_ctb_tile_y == (ps_tile->u2_ht)) { /* Reached End of Tile */ ps_codec->s_parse.i4_ctb_tile_y = 0; ps_codec->s_parse.i4_ctb_tile_x = 0; ps_codec->s_parse.ps_tile++; if((ps_tile->u2_ht + ps_tile->u1_pos_y == ps_sps->i2_pic_ht_in_ctb) && (ps_tile->u2_wd + ps_tile->u1_pos_x == ps_sps->i2_pic_wd_in_ctb)) { /* Reached end of frame */ end_of_pic = 1; ps_codec->s_parse.i4_ctb_x = 0; ps_codec->s_parse.i4_ctb_y = ps_sps->i2_pic_ht_in_ctb; } else { /* Initialize ctb_x and ctb_y to start of next tile */ ps_tile = ps_codec->s_parse.ps_tile; ps_codec->s_parse.i4_ctb_x = ps_tile->u1_pos_x; ps_codec->s_parse.i4_ctb_y = ps_tile->u1_pos_y; ps_codec->s_parse.i4_ctb_tile_y = 0; ps_codec->s_parse.i4_ctb_tile_x = 0; ps_codec->s_parse.i4_ctb_slice_x = ps_tile->u1_pos_x; ps_codec->s_parse.i4_ctb_slice_y = ps_tile->u1_pos_y; } } } ps_codec->s_parse.i4_next_ctb_indx = ps_codec->s_parse.i4_ctb_x + ps_codec->s_parse.i4_ctb_y * ps_sps->i2_pic_wd_in_ctb; /* If the current slice is in error, check if the next slice's address * is reached and mark the end_of_slice flag */ if(ps_codec->i4_slice_error) { slice_header_t *ps_slice_hdr_next = ps_slice_hdr + 1; WORD32 next_slice_addr = ps_slice_hdr_next->i2_ctb_x + ps_slice_hdr_next->i2_ctb_y * ps_sps->i2_pic_wd_in_ctb; if(ps_codec->s_parse.i4_next_ctb_indx == next_slice_addr) end_of_slice_flag = 1; } /* If the codec is running in single core mode * then call process function for current CTB */ if((1 == ps_codec->i4_num_cores) && (ps_codec->s_parse.i4_ctb_tile_x == 0)) { process_ctxt_t *ps_proc = &ps_codec->as_process[0]; ps_proc->i4_ctb_cnt = ps_proc->ps_tile->u2_wd; ihevcd_process(ps_proc); } /* If the bytes for the current slice are exhausted * set end_of_slice flag to 1 * This slice will be treated as incomplete */ if((UWORD8 *)ps_codec->s_parse.s_bitstrm.pu1_buf_max + BITSTRM_OFF_THRS < ((UWORD8 *)ps_codec->s_parse.s_bitstrm.pu4_buf + (ps_codec->s_parse.s_bitstrm.u4_bit_ofst / 8))) { if(0 == ps_codec->i4_slice_error) end_of_slice_flag = 1; } if(end_of_pic) break; } while(!end_of_slice_flag); /* Reset slice error */ ps_codec->i4_slice_error = 0; /* Increment the slice index for parsing next slice */ if(0 == end_of_pic) { while(1) { WORD32 parse_slice_idx; parse_slice_idx = ps_codec->s_parse.i4_cur_slice_idx; parse_slice_idx++; { /* If the next slice header is not initialized, update cur_slice_idx and break */ if((1 == ps_codec->i4_num_cores) || (0 != (parse_slice_idx & (MAX_SLICE_HDR_CNT - 1)))) { ps_codec->s_parse.i4_cur_slice_idx = parse_slice_idx; break; } /* If the next slice header is initialised, wait for the parsed slices to be processed */ else { WORD32 ctb_indx = 0; while(ctb_indx != ps_sps->i4_pic_size_in_ctb) { WORD32 parse_status = *(ps_codec->pu1_parse_map + ctb_indx); volatile WORD32 proc_status = *(ps_codec->pu1_proc_map + ctb_indx) & 1; if(parse_status == proc_status) ctb_indx++; } ps_codec->s_parse.i4_cur_slice_idx = parse_slice_idx; break; } } } } else { #if FRAME_ILF_PAD if(FRAME_ILF_PAD && 1 == ps_codec->i4_num_cores) { if(ps_slice_hdr->i4_abs_pic_order_cnt == 0) { DUMP_PRE_ILF(ps_codec->as_process[0].pu1_cur_pic_luma, ps_codec->as_process[0].pu1_cur_pic_chroma, ps_sps->i2_pic_width_in_luma_samples, ps_sps->i2_pic_height_in_luma_samples, ps_codec->i4_strd); DUMP_BS(ps_codec->as_process[0].s_bs_ctxt.pu4_pic_vert_bs, ps_codec->as_process[0].s_bs_ctxt.pu4_pic_horz_bs, ps_sps->i2_pic_wd_in_ctb * (ctb_size * ctb_size / 8 / 16) * ps_sps->i2_pic_ht_in_ctb, (ps_sps->i2_pic_wd_in_ctb + 1) * (ctb_size * ctb_size / 8 / 16) * ps_sps->i2_pic_ht_in_ctb); DUMP_QP(ps_codec->as_process[0].s_bs_ctxt.pu1_pic_qp, (ps_sps->i2_pic_height_in_luma_samples * ps_sps->i2_pic_width_in_luma_samples) / (MIN_CU_SIZE * MIN_CU_SIZE)); DUMP_QP_CONST_IN_CTB(ps_codec->as_process[0].s_bs_ctxt.pu1_pic_qp_const_in_ctb, (ps_sps->i2_pic_height_in_luma_samples * ps_sps->i2_pic_width_in_luma_samples) / (MIN_CTB_SIZE * MIN_CTB_SIZE) / 8); DUMP_NO_LOOP_FILTER(ps_codec->as_process[0].pu1_pic_no_loop_filter_flag, (ps_sps->i2_pic_width_in_luma_samples / MIN_CU_SIZE) * (ps_sps->i2_pic_height_in_luma_samples / MIN_CU_SIZE) / 8); DUMP_OFFSETS(ps_slice_hdr->i1_beta_offset_div2, ps_slice_hdr->i1_tc_offset_div2, ps_pps->i1_pic_cb_qp_offset, ps_pps->i1_pic_cr_qp_offset); } ps_codec->s_parse.s_deblk_ctxt.ps_pps = ps_codec->s_parse.ps_pps; ps_codec->s_parse.s_deblk_ctxt.ps_sps = ps_codec->s_parse.ps_sps; ps_codec->s_parse.s_deblk_ctxt.ps_codec = ps_codec; ps_codec->s_parse.s_deblk_ctxt.ps_slice_hdr = ps_codec->s_parse.ps_slice_hdr; ps_codec->s_parse.s_deblk_ctxt.is_chroma_yuv420sp_vu = (ps_codec->e_ref_chroma_fmt == IV_YUV_420SP_VU); ps_codec->s_parse.s_sao_ctxt.ps_pps = ps_codec->s_parse.ps_pps; ps_codec->s_parse.s_sao_ctxt.ps_sps = ps_codec->s_parse.ps_sps; ps_codec->s_parse.s_sao_ctxt.ps_codec = ps_codec; ps_codec->s_parse.s_sao_ctxt.ps_slice_hdr = ps_codec->s_parse.ps_slice_hdr; ihevcd_ilf_pad_frame(&ps_codec->s_parse.s_deblk_ctxt, &ps_codec->s_parse.s_sao_ctxt); } #endif ps_codec->s_parse.i4_end_of_frame = 1; } return ret; } Commit Message: Set error skip ctbs as multiple 8x8 pus Bug: 65123471 This is required for incomplete ctbs at the frame boundaries Change-Id: I7e41a3ac2f6e35a929ba4ff3ca4cfcc859a7b867 CWE ID: CWE-200
IHEVCD_ERROR_T ihevcd_parse_slice_data(codec_t *ps_codec) { IHEVCD_ERROR_T ret = (IHEVCD_ERROR_T)IHEVCD_SUCCESS; WORD32 end_of_slice_flag = 0; sps_t *ps_sps; pps_t *ps_pps; slice_header_t *ps_slice_hdr; WORD32 end_of_pic; tile_t *ps_tile, *ps_tile_prev; WORD32 i; WORD32 ctb_addr; WORD32 tile_idx; WORD32 cabac_init_idc; WORD32 ctb_size; WORD32 num_ctb_in_row; WORD32 num_min4x4_in_ctb; WORD32 slice_qp; WORD32 slice_start_ctb_idx; WORD32 tile_start_ctb_idx; ps_slice_hdr = ps_codec->s_parse.ps_slice_hdr_base; ps_pps = ps_codec->s_parse.ps_pps_base; ps_sps = ps_codec->s_parse.ps_sps_base; /* Get current slice header, pps and sps */ ps_slice_hdr += (ps_codec->s_parse.i4_cur_slice_idx & (MAX_SLICE_HDR_CNT - 1)); ps_pps += ps_slice_hdr->i1_pps_id; ps_sps += ps_pps->i1_sps_id; if(0 != ps_codec->s_parse.i4_cur_slice_idx) { if(!ps_slice_hdr->i1_dependent_slice_flag) { ps_codec->s_parse.i4_cur_independent_slice_idx++; if(MAX_SLICE_HDR_CNT == ps_codec->s_parse.i4_cur_independent_slice_idx) ps_codec->s_parse.i4_cur_independent_slice_idx = 0; } } ctb_size = 1 << ps_sps->i1_log2_ctb_size; num_min4x4_in_ctb = (ctb_size / 4) * (ctb_size / 4); num_ctb_in_row = ps_sps->i2_pic_wd_in_ctb; /* Update the parse context */ if(0 == ps_codec->i4_slice_error) { ps_codec->s_parse.i4_ctb_x = ps_slice_hdr->i2_ctb_x; ps_codec->s_parse.i4_ctb_y = ps_slice_hdr->i2_ctb_y; } ps_codec->s_parse.ps_pps = ps_pps; ps_codec->s_parse.ps_sps = ps_sps; ps_codec->s_parse.ps_slice_hdr = ps_slice_hdr; /* Derive Tile positions for the current CTB */ /* Change this to lookup if required */ ihevcd_get_tile_pos(ps_pps, ps_sps, ps_codec->s_parse.i4_ctb_x, ps_codec->s_parse.i4_ctb_y, &ps_codec->s_parse.i4_ctb_tile_x, &ps_codec->s_parse.i4_ctb_tile_y, &tile_idx); ps_codec->s_parse.ps_tile = ps_pps->ps_tile + tile_idx; ps_codec->s_parse.i4_cur_tile_idx = tile_idx; ps_tile = ps_codec->s_parse.ps_tile; if(tile_idx) ps_tile_prev = ps_tile - 1; else ps_tile_prev = ps_tile; /* If the present slice is dependent, then store the previous * independent slices' ctb x and y values for decoding process */ if(0 == ps_codec->i4_slice_error) { if(1 == ps_slice_hdr->i1_dependent_slice_flag) { /*If slice is present at the start of a new tile*/ if((0 == ps_codec->s_parse.i4_ctb_tile_x) && (0 == ps_codec->s_parse.i4_ctb_tile_y)) { ps_codec->s_parse.i4_ctb_slice_x = 0; ps_codec->s_parse.i4_ctb_slice_y = 0; } } if(!ps_slice_hdr->i1_dependent_slice_flag) { ps_codec->s_parse.i4_ctb_slice_x = 0; ps_codec->s_parse.i4_ctb_slice_y = 0; } } /* Frame level initializations */ if((0 == ps_codec->s_parse.i4_ctb_y) && (0 == ps_codec->s_parse.i4_ctb_x)) { ret = ihevcd_parse_pic_init(ps_codec); RETURN_IF((ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS), ret); ps_codec->s_parse.pu4_pic_tu_idx[0] = 0; ps_codec->s_parse.pu4_pic_pu_idx[0] = 0; ps_codec->s_parse.i4_cur_independent_slice_idx = 0; ps_codec->s_parse.i4_ctb_tile_x = 0; ps_codec->s_parse.i4_ctb_tile_y = 0; } { /* Updating the poc list of current slice to ps_mv_buf */ mv_buf_t *ps_mv_buf = ps_codec->s_parse.ps_cur_mv_buf; if(ps_slice_hdr->i1_num_ref_idx_l1_active != 0) { for(i = 0; i < ps_slice_hdr->i1_num_ref_idx_l1_active; i++) { ps_mv_buf->l1_collocated_poc[(ps_codec->s_parse.i4_cur_slice_idx & (MAX_SLICE_HDR_CNT - 1))][i] = ((pic_buf_t *)ps_slice_hdr->as_ref_pic_list1[i].pv_pic_buf)->i4_abs_poc; ps_mv_buf->u1_l1_collocated_poc_lt[(ps_codec->s_parse.i4_cur_slice_idx & (MAX_SLICE_HDR_CNT - 1))][i] = ((pic_buf_t *)ps_slice_hdr->as_ref_pic_list1[i].pv_pic_buf)->u1_used_as_ref; } } if(ps_slice_hdr->i1_num_ref_idx_l0_active != 0) { for(i = 0; i < ps_slice_hdr->i1_num_ref_idx_l0_active; i++) { ps_mv_buf->l0_collocated_poc[(ps_codec->s_parse.i4_cur_slice_idx & (MAX_SLICE_HDR_CNT - 1))][i] = ((pic_buf_t *)ps_slice_hdr->as_ref_pic_list0[i].pv_pic_buf)->i4_abs_poc; ps_mv_buf->u1_l0_collocated_poc_lt[(ps_codec->s_parse.i4_cur_slice_idx & (MAX_SLICE_HDR_CNT - 1))][i] = ((pic_buf_t *)ps_slice_hdr->as_ref_pic_list0[i].pv_pic_buf)->u1_used_as_ref; } } } /*Initialize the low delay flag at the beginning of every slice*/ if((0 == ps_codec->s_parse.i4_ctb_slice_x) || (0 == ps_codec->s_parse.i4_ctb_slice_y)) { /* Lowdelay flag */ WORD32 cur_poc, ref_list_poc, flag = 1; cur_poc = ps_slice_hdr->i4_abs_pic_order_cnt; for(i = 0; i < ps_slice_hdr->i1_num_ref_idx_l0_active; i++) { ref_list_poc = ((mv_buf_t *)ps_slice_hdr->as_ref_pic_list0[i].pv_mv_buf)->i4_abs_poc; if(ref_list_poc > cur_poc) { flag = 0; break; } } if(flag && (ps_slice_hdr->i1_slice_type == BSLICE)) { for(i = 0; i < ps_slice_hdr->i1_num_ref_idx_l1_active; i++) { ref_list_poc = ((mv_buf_t *)ps_slice_hdr->as_ref_pic_list1[i].pv_mv_buf)->i4_abs_poc; if(ref_list_poc > cur_poc) { flag = 0; break; } } } ps_slice_hdr->i1_low_delay_flag = flag; } /* initialize the cabac init idc based on slice type */ if(ps_slice_hdr->i1_slice_type == ISLICE) { cabac_init_idc = 0; } else if(ps_slice_hdr->i1_slice_type == PSLICE) { cabac_init_idc = ps_slice_hdr->i1_cabac_init_flag ? 2 : 1; } else { cabac_init_idc = ps_slice_hdr->i1_cabac_init_flag ? 1 : 2; } slice_qp = ps_slice_hdr->i1_slice_qp_delta + ps_pps->i1_pic_init_qp; slice_qp = CLIP3(slice_qp, 0, 51); /*Update QP value for every indepndent slice or for every dependent slice that begins at the start of a new tile*/ if((0 == ps_slice_hdr->i1_dependent_slice_flag) || ((1 == ps_slice_hdr->i1_dependent_slice_flag) && ((0 == ps_codec->s_parse.i4_ctb_tile_x) && (0 == ps_codec->s_parse.i4_ctb_tile_y)))) { ps_codec->s_parse.u4_qp = slice_qp; } /*Cabac init at the beginning of a slice*/ if((1 == ps_slice_hdr->i1_dependent_slice_flag) && (!((ps_codec->s_parse.i4_ctb_tile_x == 0) && (ps_codec->s_parse.i4_ctb_tile_y == 0)))) { if((0 == ps_pps->i1_entropy_coding_sync_enabled_flag) || (ps_pps->i1_entropy_coding_sync_enabled_flag && (0 != ps_codec->s_parse.i4_ctb_x))) { ihevcd_cabac_reset(&ps_codec->s_parse.s_cabac, &ps_codec->s_parse.s_bitstrm); } } else if((0 == ps_pps->i1_entropy_coding_sync_enabled_flag) || (ps_pps->i1_entropy_coding_sync_enabled_flag && (0 != ps_codec->s_parse.i4_ctb_x))) { ret = ihevcd_cabac_init(&ps_codec->s_parse.s_cabac, &ps_codec->s_parse.s_bitstrm, slice_qp, cabac_init_idc, &gau1_ihevc_cab_ctxts[cabac_init_idc][slice_qp][0]); if(ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS) { ps_codec->i4_slice_error = 1; end_of_slice_flag = 1; ret = (IHEVCD_ERROR_T)IHEVCD_SUCCESS; } } do { { WORD32 cur_ctb_idx = ps_codec->s_parse.i4_ctb_x + ps_codec->s_parse.i4_ctb_y * (ps_sps->i2_pic_wd_in_ctb); if(1 == ps_codec->i4_num_cores && 0 == cur_ctb_idx % RESET_TU_BUF_NCTB) { ps_codec->s_parse.ps_tu = ps_codec->s_parse.ps_pic_tu; ps_codec->s_parse.i4_pic_tu_idx = 0; } } end_of_pic = 0; /* Section:7.3.7 Coding tree unit syntax */ /* coding_tree_unit() inlined here */ /* If number of cores is greater than 1, then add job to the queue */ /* At the start of ctb row parsing in a tile, queue a job for processing the current tile row */ ps_codec->s_parse.i4_ctb_num_pcm_blks = 0; /*At the beginning of each tile-which is not the beginning of a slice, cabac context must be initialized. * Hence, check for the tile beginning here */ if(((0 == ps_codec->s_parse.i4_ctb_tile_x) && (0 == ps_codec->s_parse.i4_ctb_tile_y)) && (!((ps_tile->u1_pos_x == 0) && (ps_tile->u1_pos_y == 0))) && (!((0 == ps_codec->s_parse.i4_ctb_slice_x) && (0 == ps_codec->s_parse.i4_ctb_slice_y)))) { slice_qp = ps_slice_hdr->i1_slice_qp_delta + ps_pps->i1_pic_init_qp; slice_qp = CLIP3(slice_qp, 0, 51); ps_codec->s_parse.u4_qp = slice_qp; ihevcd_get_tile_pos(ps_pps, ps_sps, ps_codec->s_parse.i4_ctb_x, ps_codec->s_parse.i4_ctb_y, &ps_codec->s_parse.i4_ctb_tile_x, &ps_codec->s_parse.i4_ctb_tile_y, &tile_idx); ps_codec->s_parse.ps_tile = ps_pps->ps_tile + tile_idx; ps_codec->s_parse.i4_cur_tile_idx = tile_idx; ps_tile_prev = ps_tile - 1; tile_start_ctb_idx = ps_tile->u1_pos_x + ps_tile->u1_pos_y * (ps_sps->i2_pic_wd_in_ctb); slice_start_ctb_idx = ps_slice_hdr->i2_ctb_x + ps_slice_hdr->i2_ctb_y * (ps_sps->i2_pic_wd_in_ctb); /*For slices that span across multiple tiles*/ if(slice_start_ctb_idx < tile_start_ctb_idx) { /* 2 Cases * 1 - slice spans across frame-width- but does not start from 1st column * 2 - Slice spans across multiple tiles anywhere is a frame */ ps_codec->s_parse.i4_ctb_slice_y = ps_tile->u1_pos_y - ps_slice_hdr->i2_ctb_y; if(!(((ps_slice_hdr->i2_ctb_x + ps_tile_prev->u2_wd) % ps_sps->i2_pic_wd_in_ctb) == ps_tile->u1_pos_x)) //Case 2 { if(ps_slice_hdr->i2_ctb_y <= ps_tile->u1_pos_y) { if(ps_slice_hdr->i2_ctb_x > ps_tile->u1_pos_x) { ps_codec->s_parse.i4_ctb_slice_y -= 1; } } } /*ps_codec->s_parse.i4_ctb_slice_y = ps_tile->u1_pos_y - ps_slice_hdr->i2_ctb_y; if (ps_slice_hdr->i2_ctb_y <= ps_tile->u1_pos_y) { if (ps_slice_hdr->i2_ctb_x > ps_tile->u1_pos_x ) { ps_codec->s_parse.i4_ctb_slice_y -= 1 ; } }*/ } if(!ps_slice_hdr->i1_dependent_slice_flag) { ret = ihevcd_cabac_init(&ps_codec->s_parse.s_cabac, &ps_codec->s_parse.s_bitstrm, slice_qp, cabac_init_idc, &gau1_ihevc_cab_ctxts[cabac_init_idc][slice_qp][0]); if(ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS) { ps_codec->i4_slice_error = 1; end_of_slice_flag = 1; ret = (IHEVCD_ERROR_T)IHEVCD_SUCCESS; } } } /* If number of cores is greater than 1, then add job to the queue */ /* At the start of ctb row parsing in a tile, queue a job for processing the current tile row */ if(0 == ps_codec->s_parse.i4_ctb_tile_x) { if(1 < ps_codec->i4_num_cores) { proc_job_t s_job; IHEVCD_ERROR_T ret; s_job.i4_cmd = CMD_PROCESS; s_job.i2_ctb_cnt = (WORD16)ps_tile->u2_wd; s_job.i2_ctb_x = (WORD16)ps_codec->s_parse.i4_ctb_x; s_job.i2_ctb_y = (WORD16)ps_codec->s_parse.i4_ctb_y; s_job.i2_slice_idx = (WORD16)ps_codec->s_parse.i4_cur_slice_idx; s_job.i4_tu_coeff_data_ofst = (UWORD8 *)ps_codec->s_parse.pv_tu_coeff_data - (UWORD8 *)ps_codec->s_parse.pv_pic_tu_coeff_data; ret = ihevcd_jobq_queue((jobq_t *)ps_codec->s_parse.pv_proc_jobq, &s_job, sizeof(proc_job_t), 1); if(ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS) return ret; } else { process_ctxt_t *ps_proc = &ps_codec->as_process[0]; WORD32 tu_coeff_data_ofst = (UWORD8 *)ps_codec->s_parse.pv_tu_coeff_data - (UWORD8 *)ps_codec->s_parse.pv_pic_tu_coeff_data; /* If the codec is running in single core mode, * initialize zeroth process context * TODO: Dual core mode might need a different implementation instead of jobq */ ps_proc->i4_ctb_cnt = ps_tile->u2_wd; ps_proc->i4_ctb_x = ps_codec->s_parse.i4_ctb_x; ps_proc->i4_ctb_y = ps_codec->s_parse.i4_ctb_y; ps_proc->i4_cur_slice_idx = ps_codec->s_parse.i4_cur_slice_idx; ihevcd_init_proc_ctxt(ps_proc, tu_coeff_data_ofst); } } /* Restore cabac context model from top right CTB if entropy sync is enabled */ if(ps_pps->i1_entropy_coding_sync_enabled_flag) { /*TODO Handle single CTB and top-right belonging to a different slice */ if(0 == ps_codec->s_parse.i4_ctb_x) { WORD32 default_ctxt = 0; if((0 == ps_codec->s_parse.i4_ctb_slice_y) && (!ps_slice_hdr->i1_dependent_slice_flag)) default_ctxt = 1; if(1 == ps_sps->i2_pic_wd_in_ctb) default_ctxt = 1; ps_codec->s_parse.u4_qp = slice_qp; if(default_ctxt) { ret = ihevcd_cabac_init(&ps_codec->s_parse.s_cabac, &ps_codec->s_parse.s_bitstrm, slice_qp, cabac_init_idc, &gau1_ihevc_cab_ctxts[cabac_init_idc][slice_qp][0]); if(ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS) { ps_codec->i4_slice_error = 1; end_of_slice_flag = 1; ret = (IHEVCD_ERROR_T)IHEVCD_SUCCESS; } } else { ret = ihevcd_cabac_init(&ps_codec->s_parse.s_cabac, &ps_codec->s_parse.s_bitstrm, slice_qp, cabac_init_idc, (const UWORD8 *)&ps_codec->s_parse.s_cabac.au1_ctxt_models_sync); if(ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS) { ps_codec->i4_slice_error = 1; end_of_slice_flag = 1; ret = (IHEVCD_ERROR_T)IHEVCD_SUCCESS; } } } } if(0 == ps_codec->i4_slice_error) { if(ps_slice_hdr->i1_slice_sao_luma_flag || ps_slice_hdr->i1_slice_sao_chroma_flag) ihevcd_parse_sao(ps_codec); } else { sao_t *ps_sao = ps_codec->s_parse.ps_pic_sao + ps_codec->s_parse.i4_ctb_x + ps_codec->s_parse.i4_ctb_y * ps_sps->i2_pic_wd_in_ctb; /* Default values */ ps_sao->b3_y_type_idx = 0; ps_sao->b3_cb_type_idx = 0; ps_sao->b3_cr_type_idx = 0; } { WORD32 ctb_indx; ctb_indx = ps_codec->s_parse.i4_ctb_x + ps_sps->i2_pic_wd_in_ctb * ps_codec->s_parse.i4_ctb_y; ps_codec->s_parse.s_bs_ctxt.pu1_pic_qp_const_in_ctb[ctb_indx >> 3] |= (1 << (ctb_indx & 7)); { UWORD16 *pu1_slice_idx = ps_codec->s_parse.pu1_slice_idx; pu1_slice_idx[ctb_indx] = ps_codec->s_parse.i4_cur_independent_slice_idx; } } if(0 == ps_codec->i4_slice_error) { tu_t *ps_tu = ps_codec->s_parse.ps_tu; WORD32 i4_tu_cnt = ps_codec->s_parse.s_cu.i4_tu_cnt; WORD32 i4_pic_tu_idx = ps_codec->s_parse.i4_pic_tu_idx; pu_t *ps_pu = ps_codec->s_parse.ps_pu; WORD32 i4_pic_pu_idx = ps_codec->s_parse.i4_pic_pu_idx; UWORD8 *pu1_tu_coeff_data = (UWORD8 *)ps_codec->s_parse.pv_tu_coeff_data; ret = ihevcd_parse_coding_quadtree(ps_codec, (ps_codec->s_parse.i4_ctb_x << ps_sps->i1_log2_ctb_size), (ps_codec->s_parse.i4_ctb_y << ps_sps->i1_log2_ctb_size), ps_sps->i1_log2_ctb_size, 0); /* Check for error */ if (ret != (IHEVCD_ERROR_T)IHEVCD_SUCCESS) { /* Reset tu and pu parameters, and signal current ctb as skip */ WORD32 tu_coeff_data_reset_size; ps_codec->s_parse.ps_tu = ps_tu; ps_codec->s_parse.s_cu.i4_tu_cnt = i4_tu_cnt; ps_codec->s_parse.i4_pic_tu_idx = i4_pic_tu_idx; ps_codec->s_parse.ps_pu = ps_pu; ps_codec->s_parse.i4_pic_pu_idx = i4_pic_pu_idx; tu_coeff_data_reset_size = (UWORD8 *)ps_codec->s_parse.pv_tu_coeff_data - pu1_tu_coeff_data; memset(pu1_tu_coeff_data, 0, tu_coeff_data_reset_size); ps_codec->s_parse.pv_tu_coeff_data = (void *)pu1_tu_coeff_data; ihevcd_set_ctb_skip(ps_codec); /* Set slice error to suppress further parsing and * signal end of slice. */ ps_codec->i4_slice_error = 1; end_of_slice_flag = 1; ret = (IHEVCD_ERROR_T)IHEVCD_SUCCESS; } } else { ihevcd_set_ctb_skip(ps_codec); } if(0 == ps_codec->i4_slice_error) end_of_slice_flag = ihevcd_cabac_decode_terminate(&ps_codec->s_parse.s_cabac, &ps_codec->s_parse.s_bitstrm); AEV_TRACE("end_of_slice_flag", end_of_slice_flag, ps_codec->s_parse.s_cabac.u4_range); /* In case of tiles or entropy sync, terminate cabac and copy cabac context backed up at the end of top-right CTB */ if(ps_pps->i1_tiles_enabled_flag || ps_pps->i1_entropy_coding_sync_enabled_flag) { WORD32 end_of_tile = 0; WORD32 end_of_tile_row = 0; /* Take a back up of cabac context models if entropy sync is enabled */ if(ps_pps->i1_entropy_coding_sync_enabled_flag || ps_pps->i1_tiles_enabled_flag) { if(1 == ps_codec->s_parse.i4_ctb_x) { WORD32 size = sizeof(ps_codec->s_parse.s_cabac.au1_ctxt_models); memcpy(&ps_codec->s_parse.s_cabac.au1_ctxt_models_sync, &ps_codec->s_parse.s_cabac.au1_ctxt_models, size); } } /* Since tiles and entropy sync are not enabled simultaneously, the following will not result in any problems */ if((ps_codec->s_parse.i4_ctb_tile_x + 1) == (ps_tile->u2_wd)) { end_of_tile_row = 1; if((ps_codec->s_parse.i4_ctb_tile_y + 1) == ps_tile->u2_ht) end_of_tile = 1; } if((0 == end_of_slice_flag) && ((ps_pps->i1_tiles_enabled_flag && end_of_tile) || (ps_pps->i1_entropy_coding_sync_enabled_flag && end_of_tile_row))) { WORD32 end_of_sub_stream_one_bit; end_of_sub_stream_one_bit = ihevcd_cabac_decode_terminate(&ps_codec->s_parse.s_cabac, &ps_codec->s_parse.s_bitstrm); AEV_TRACE("end_of_sub_stream_one_bit", end_of_sub_stream_one_bit, ps_codec->s_parse.s_cabac.u4_range); /* TODO: Remove the check for offset when HM is updated to include a byte unconditionally even for aligned location */ /* For Ittiam streams this check should not be there, for HM9.1 streams this should be there */ if(ps_codec->s_parse.s_bitstrm.u4_bit_ofst % 8) ihevcd_bits_flush_to_byte_boundary(&ps_codec->s_parse.s_bitstrm); UNUSED(end_of_sub_stream_one_bit); } } { WORD32 ctb_indx; ctb_addr = ps_codec->s_parse.i4_ctb_y * num_ctb_in_row + ps_codec->s_parse.i4_ctb_x; ctb_indx = ++ctb_addr; /* Store pu_idx for next CTB in frame level pu_idx array */ if((ps_tile->u2_wd == (ps_codec->s_parse.i4_ctb_tile_x + 1)) && (ps_tile->u2_wd != ps_sps->i2_pic_wd_in_ctb)) { ctb_indx = (ps_sps->i2_pic_wd_in_ctb * (ps_codec->s_parse.i4_ctb_tile_y + 1 + ps_tile->u1_pos_y)) + ps_tile->u1_pos_x; //idx is the beginning of next row in current tile. if(ps_tile->u2_ht == (ps_codec->s_parse.i4_ctb_tile_y + 1)) { if((ps_tile->u2_wd + ps_tile->u1_pos_x == ps_sps->i2_pic_wd_in_ctb) && ((ps_tile->u2_ht + ps_tile->u1_pos_y == ps_sps->i2_pic_ht_in_ctb))) { ctb_indx = ctb_addr; //Next continuous ctb address } else //Not last tile's end , but a tile end { tile_t *ps_next_tile = ps_codec->s_parse.ps_tile + 1; ctb_indx = ps_next_tile->u1_pos_x + (ps_next_tile->u1_pos_y * ps_sps->i2_pic_wd_in_ctb); //idx is the beginning of first row in next tile. } } } ps_codec->s_parse.pu4_pic_pu_idx[ctb_indx] = ps_codec->s_parse.i4_pic_pu_idx; ps_codec->s_parse.i4_next_pu_ctb_cnt = ctb_indx; ps_codec->s_parse.pu1_pu_map += num_min4x4_in_ctb; /* Store tu_idx for next CTB in frame level tu_idx array */ if(1 == ps_codec->i4_num_cores) { ctb_indx = (0 == ctb_addr % RESET_TU_BUF_NCTB) ? RESET_TU_BUF_NCTB : ctb_addr % RESET_TU_BUF_NCTB; if((ps_tile->u2_wd == (ps_codec->s_parse.i4_ctb_tile_x + 1)) && (ps_tile->u2_wd != ps_sps->i2_pic_wd_in_ctb)) { ctb_indx = (ps_sps->i2_pic_wd_in_ctb * (ps_codec->s_parse.i4_ctb_tile_y + 1 + ps_tile->u1_pos_y)) + ps_tile->u1_pos_x; //idx is the beginning of next row in current tile. if(ps_tile->u2_ht == (ps_codec->s_parse.i4_ctb_tile_y + 1)) { if((ps_tile->u2_wd + ps_tile->u1_pos_x == ps_sps->i2_pic_wd_in_ctb) && ((ps_tile->u2_ht + ps_tile->u1_pos_y == ps_sps->i2_pic_ht_in_ctb))) { ctb_indx = (0 == ctb_addr % RESET_TU_BUF_NCTB) ? RESET_TU_BUF_NCTB : ctb_addr % RESET_TU_BUF_NCTB; } else //Not last tile's end , but a tile end { tile_t *ps_next_tile = ps_codec->s_parse.ps_tile + 1; ctb_indx = ps_next_tile->u1_pos_x + (ps_next_tile->u1_pos_y * ps_sps->i2_pic_wd_in_ctb); //idx is the beginning of first row in next tile. } } } ps_codec->s_parse.i4_next_tu_ctb_cnt = ctb_indx; ps_codec->s_parse.pu4_pic_tu_idx[ctb_indx] = ps_codec->s_parse.i4_pic_tu_idx; } else { ctb_indx = ctb_addr; if((ps_tile->u2_wd == (ps_codec->s_parse.i4_ctb_tile_x + 1)) && (ps_tile->u2_wd != ps_sps->i2_pic_wd_in_ctb)) { ctb_indx = (ps_sps->i2_pic_wd_in_ctb * (ps_codec->s_parse.i4_ctb_tile_y + 1 + ps_tile->u1_pos_y)) + ps_tile->u1_pos_x; //idx is the beginning of next row in current tile. if(ps_tile->u2_ht == (ps_codec->s_parse.i4_ctb_tile_y + 1)) { if((ps_tile->u2_wd + ps_tile->u1_pos_x == ps_sps->i2_pic_wd_in_ctb) && ((ps_tile->u2_ht + ps_tile->u1_pos_y == ps_sps->i2_pic_ht_in_ctb))) { ctb_indx = ctb_addr; } else //Not last tile's end , but a tile end { tile_t *ps_next_tile = ps_codec->s_parse.ps_tile + 1; ctb_indx = ps_next_tile->u1_pos_x + (ps_next_tile->u1_pos_y * ps_sps->i2_pic_wd_in_ctb); //idx is the beginning of first row in next tile. } } } ps_codec->s_parse.i4_next_tu_ctb_cnt = ctb_indx; ps_codec->s_parse.pu4_pic_tu_idx[ctb_indx] = ps_codec->s_parse.i4_pic_tu_idx; } ps_codec->s_parse.pu1_tu_map += num_min4x4_in_ctb; } if(ps_codec->i4_num_cores <= MV_PRED_NUM_CORES_THRESHOLD) { /*************************************************/ /**************** MV pred **********************/ /*************************************************/ WORD8 u1_top_ctb_avail = 1; WORD8 u1_left_ctb_avail = 1; WORD8 u1_top_lt_ctb_avail = 1; WORD8 u1_top_rt_ctb_avail = 1; WORD16 i2_wd_in_ctb; tile_start_ctb_idx = ps_tile->u1_pos_x + ps_tile->u1_pos_y * (ps_sps->i2_pic_wd_in_ctb); slice_start_ctb_idx = ps_slice_hdr->i2_ctb_x + ps_slice_hdr->i2_ctb_y * (ps_sps->i2_pic_wd_in_ctb); if((slice_start_ctb_idx < tile_start_ctb_idx)) { i2_wd_in_ctb = ps_sps->i2_pic_wd_in_ctb; } else { i2_wd_in_ctb = ps_tile->u2_wd; } /* slice and tile boundaries */ if((0 == ps_codec->s_parse.i4_ctb_y) || (0 == ps_codec->s_parse.i4_ctb_tile_y)) { u1_top_ctb_avail = 0; u1_top_lt_ctb_avail = 0; u1_top_rt_ctb_avail = 0; } if((0 == ps_codec->s_parse.i4_ctb_x) || (0 == ps_codec->s_parse.i4_ctb_tile_x)) { u1_left_ctb_avail = 0; u1_top_lt_ctb_avail = 0; if((0 == ps_codec->s_parse.i4_ctb_slice_y) || (0 == ps_codec->s_parse.i4_ctb_tile_y)) { u1_top_ctb_avail = 0; if((i2_wd_in_ctb - 1) != ps_codec->s_parse.i4_ctb_slice_x) //TODO: For tile, not implemented { u1_top_rt_ctb_avail = 0; } } } /*For slices not beginning at start of a ctb row*/ else if(ps_codec->s_parse.i4_ctb_x > 0) { if((0 == ps_codec->s_parse.i4_ctb_slice_y) || (0 == ps_codec->s_parse.i4_ctb_tile_y)) { u1_top_ctb_avail = 0; u1_top_lt_ctb_avail = 0; if(0 == ps_codec->s_parse.i4_ctb_slice_x) { u1_left_ctb_avail = 0; } if((i2_wd_in_ctb - 1) != ps_codec->s_parse.i4_ctb_slice_x) { u1_top_rt_ctb_avail = 0; } } else if((1 == ps_codec->s_parse.i4_ctb_slice_y) && (0 == ps_codec->s_parse.i4_ctb_slice_x)) { u1_top_lt_ctb_avail = 0; } } if(((ps_sps->i2_pic_wd_in_ctb - 1) == ps_codec->s_parse.i4_ctb_x) || ((ps_tile->u2_wd - 1) == ps_codec->s_parse.i4_ctb_tile_x)) { u1_top_rt_ctb_avail = 0; } if(PSLICE == ps_slice_hdr->i1_slice_type || BSLICE == ps_slice_hdr->i1_slice_type) { mv_ctxt_t s_mv_ctxt; process_ctxt_t *ps_proc; UWORD32 *pu4_ctb_top_pu_idx; UWORD32 *pu4_ctb_left_pu_idx; UWORD32 *pu4_ctb_top_left_pu_idx; WORD32 i4_ctb_pu_cnt; WORD32 cur_ctb_idx; WORD32 next_ctb_idx; WORD32 cur_pu_idx; ps_proc = &ps_codec->as_process[(ps_codec->i4_num_cores == 1) ? 1 : (ps_codec->i4_num_cores - 1)]; cur_ctb_idx = ps_codec->s_parse.i4_ctb_x + ps_codec->s_parse.i4_ctb_y * (ps_sps->i2_pic_wd_in_ctb); next_ctb_idx = ps_codec->s_parse.i4_next_pu_ctb_cnt; i4_ctb_pu_cnt = ps_codec->s_parse.pu4_pic_pu_idx[next_ctb_idx] - ps_codec->s_parse.pu4_pic_pu_idx[cur_ctb_idx]; cur_pu_idx = ps_codec->s_parse.pu4_pic_pu_idx[cur_ctb_idx]; pu4_ctb_top_pu_idx = ps_proc->pu4_pic_pu_idx_top + (ps_codec->s_parse.i4_ctb_x * ctb_size / MIN_PU_SIZE); pu4_ctb_left_pu_idx = ps_proc->pu4_pic_pu_idx_left; pu4_ctb_top_left_pu_idx = &ps_proc->u4_ctb_top_left_pu_idx; /* Initializing s_mv_ctxt */ { s_mv_ctxt.ps_pps = ps_pps; s_mv_ctxt.ps_sps = ps_sps; s_mv_ctxt.ps_slice_hdr = ps_slice_hdr; s_mv_ctxt.i4_ctb_x = ps_codec->s_parse.i4_ctb_x; s_mv_ctxt.i4_ctb_y = ps_codec->s_parse.i4_ctb_y; s_mv_ctxt.ps_pu = &ps_codec->s_parse.ps_pic_pu[cur_pu_idx]; s_mv_ctxt.ps_pic_pu = ps_codec->s_parse.ps_pic_pu; s_mv_ctxt.ps_tile = ps_tile; s_mv_ctxt.pu4_pic_pu_idx_map = ps_proc->pu4_pic_pu_idx_map; s_mv_ctxt.pu4_pic_pu_idx = ps_codec->s_parse.pu4_pic_pu_idx; s_mv_ctxt.pu1_pic_pu_map = ps_codec->s_parse.pu1_pic_pu_map; s_mv_ctxt.i4_ctb_pu_cnt = i4_ctb_pu_cnt; s_mv_ctxt.i4_ctb_start_pu_idx = cur_pu_idx; s_mv_ctxt.u1_top_ctb_avail = u1_top_ctb_avail; s_mv_ctxt.u1_top_rt_ctb_avail = u1_top_rt_ctb_avail; s_mv_ctxt.u1_top_lt_ctb_avail = u1_top_lt_ctb_avail; s_mv_ctxt.u1_left_ctb_avail = u1_left_ctb_avail; } ihevcd_get_mv_ctb(&s_mv_ctxt, pu4_ctb_top_pu_idx, pu4_ctb_left_pu_idx, pu4_ctb_top_left_pu_idx); } else { WORD32 num_minpu_in_ctb = (ctb_size / MIN_PU_SIZE) * (ctb_size / MIN_PU_SIZE); UWORD8 *pu1_pic_pu_map_ctb = ps_codec->s_parse.pu1_pic_pu_map + (ps_codec->s_parse.i4_ctb_x + ps_codec->s_parse.i4_ctb_y * ps_sps->i2_pic_wd_in_ctb) * num_minpu_in_ctb; process_ctxt_t *ps_proc = &ps_codec->as_process[(ps_codec->i4_num_cores == 1) ? 1 : (ps_codec->i4_num_cores - 1)]; WORD32 row, col; WORD32 pu_cnt; WORD32 num_pu_per_ctb; WORD32 cur_ctb_idx; WORD32 next_ctb_idx; WORD32 ctb_start_pu_idx; UWORD32 *pu4_nbr_pu_idx = ps_proc->pu4_pic_pu_idx_map; WORD32 nbr_pu_idx_strd = MAX_CTB_SIZE / MIN_PU_SIZE + 2; pu_t *ps_pu; for(row = 0; row < ctb_size / MIN_PU_SIZE; row++) { for(col = 0; col < ctb_size / MIN_PU_SIZE; col++) { pu1_pic_pu_map_ctb[row * ctb_size / MIN_PU_SIZE + col] = 0; } } /* Neighbor PU idx update inside CTB */ /* 1byte per 4x4. Indicates the PU idx that 4x4 block belongs to */ cur_ctb_idx = ps_codec->s_parse.i4_ctb_x + ps_codec->s_parse.i4_ctb_y * (ps_sps->i2_pic_wd_in_ctb); next_ctb_idx = ps_codec->s_parse.i4_next_pu_ctb_cnt; num_pu_per_ctb = ps_codec->s_parse.pu4_pic_pu_idx[next_ctb_idx] - ps_codec->s_parse.pu4_pic_pu_idx[cur_ctb_idx]; ctb_start_pu_idx = ps_codec->s_parse.pu4_pic_pu_idx[cur_ctb_idx]; ps_pu = &ps_codec->s_parse.ps_pic_pu[ctb_start_pu_idx]; for(pu_cnt = 0; pu_cnt < num_pu_per_ctb; pu_cnt++, ps_pu++) { UWORD32 cur_pu_idx; WORD32 pu_ht = (ps_pu->b4_ht + 1) << 2; WORD32 pu_wd = (ps_pu->b4_wd + 1) << 2; cur_pu_idx = ctb_start_pu_idx + pu_cnt; for(row = 0; row < pu_ht / MIN_PU_SIZE; row++) for(col = 0; col < pu_wd / MIN_PU_SIZE; col++) pu4_nbr_pu_idx[(1 + ps_pu->b4_pos_x + col) + (1 + ps_pu->b4_pos_y + row) * nbr_pu_idx_strd] = cur_pu_idx; } /* Updating Top and Left pointers */ { WORD32 rows_remaining = ps_sps->i2_pic_height_in_luma_samples - (ps_codec->s_parse.i4_ctb_y << ps_sps->i1_log2_ctb_size); WORD32 ctb_size_left = MIN(ctb_size, rows_remaining); /* Top Left */ /* saving top left before updating top ptr, as updating top ptr will overwrite the top left for the next ctb */ ps_proc->u4_ctb_top_left_pu_idx = ps_proc->pu4_pic_pu_idx_top[(ps_codec->s_parse.i4_ctb_x * ctb_size / MIN_PU_SIZE) + ctb_size / MIN_PU_SIZE - 1]; for(i = 0; i < ctb_size / MIN_PU_SIZE; i++) { /* Left */ /* Last column of au4_nbr_pu_idx */ ps_proc->pu4_pic_pu_idx_left[i] = pu4_nbr_pu_idx[(ctb_size / MIN_PU_SIZE) + (i + 1) * nbr_pu_idx_strd]; /* Top */ /* Last row of au4_nbr_pu_idx */ ps_proc->pu4_pic_pu_idx_top[(ps_codec->s_parse.i4_ctb_x * ctb_size / MIN_PU_SIZE) + i] = pu4_nbr_pu_idx[(ctb_size_left / MIN_PU_SIZE) * nbr_pu_idx_strd + i + 1]; } } } /*************************************************/ /****************** BS, QP *********************/ /*************************************************/ /* Check if deblock is disabled for the current slice or if it is disabled for the current picture * because of disable deblock api */ if(0 == ps_codec->i4_disable_deblk_pic) { if((0 == ps_slice_hdr->i1_slice_disable_deblocking_filter_flag) && (0 == ps_codec->i4_slice_error)) { WORD32 i4_ctb_tu_cnt; WORD32 cur_ctb_idx, next_ctb_idx; WORD32 cur_pu_idx; WORD32 cur_tu_idx; process_ctxt_t *ps_proc; ps_proc = &ps_codec->as_process[(ps_codec->i4_num_cores == 1) ? 1 : (ps_codec->i4_num_cores - 1)]; cur_ctb_idx = ps_codec->s_parse.i4_ctb_x + ps_codec->s_parse.i4_ctb_y * (ps_sps->i2_pic_wd_in_ctb); cur_pu_idx = ps_codec->s_parse.pu4_pic_pu_idx[cur_ctb_idx]; next_ctb_idx = ps_codec->s_parse.i4_next_tu_ctb_cnt; if(1 == ps_codec->i4_num_cores) { i4_ctb_tu_cnt = ps_codec->s_parse.pu4_pic_tu_idx[next_ctb_idx] - ps_codec->s_parse.pu4_pic_tu_idx[cur_ctb_idx % RESET_TU_BUF_NCTB]; cur_tu_idx = ps_codec->s_parse.pu4_pic_tu_idx[cur_ctb_idx % RESET_TU_BUF_NCTB]; } else { i4_ctb_tu_cnt = ps_codec->s_parse.pu4_pic_tu_idx[next_ctb_idx] - ps_codec->s_parse.pu4_pic_tu_idx[cur_ctb_idx]; cur_tu_idx = ps_codec->s_parse.pu4_pic_tu_idx[cur_ctb_idx]; } ps_codec->s_parse.s_bs_ctxt.ps_pps = ps_codec->s_parse.ps_pps; ps_codec->s_parse.s_bs_ctxt.ps_sps = ps_codec->s_parse.ps_sps; ps_codec->s_parse.s_bs_ctxt.ps_codec = ps_codec; ps_codec->s_parse.s_bs_ctxt.i4_ctb_tu_cnt = i4_ctb_tu_cnt; ps_codec->s_parse.s_bs_ctxt.i4_ctb_x = ps_codec->s_parse.i4_ctb_x; ps_codec->s_parse.s_bs_ctxt.i4_ctb_y = ps_codec->s_parse.i4_ctb_y; ps_codec->s_parse.s_bs_ctxt.i4_ctb_tile_x = ps_codec->s_parse.i4_ctb_tile_x; ps_codec->s_parse.s_bs_ctxt.i4_ctb_tile_y = ps_codec->s_parse.i4_ctb_tile_y; ps_codec->s_parse.s_bs_ctxt.i4_ctb_slice_x = ps_codec->s_parse.i4_ctb_slice_x; ps_codec->s_parse.s_bs_ctxt.i4_ctb_slice_y = ps_codec->s_parse.i4_ctb_slice_y; ps_codec->s_parse.s_bs_ctxt.ps_tu = &ps_codec->s_parse.ps_pic_tu[cur_tu_idx]; ps_codec->s_parse.s_bs_ctxt.ps_pu = &ps_codec->s_parse.ps_pic_pu[cur_pu_idx]; ps_codec->s_parse.s_bs_ctxt.pu4_pic_pu_idx_map = ps_proc->pu4_pic_pu_idx_map; ps_codec->s_parse.s_bs_ctxt.i4_next_pu_ctb_cnt = ps_codec->s_parse.i4_next_pu_ctb_cnt; ps_codec->s_parse.s_bs_ctxt.i4_next_tu_ctb_cnt = ps_codec->s_parse.i4_next_tu_ctb_cnt; ps_codec->s_parse.s_bs_ctxt.pu1_slice_idx = ps_codec->s_parse.pu1_slice_idx; ps_codec->s_parse.s_bs_ctxt.ps_slice_hdr = ps_codec->s_parse.ps_slice_hdr; ps_codec->s_parse.s_bs_ctxt.ps_tile = ps_codec->s_parse.ps_tile; if(ISLICE == ps_slice_hdr->i1_slice_type) { ihevcd_ctb_boundary_strength_islice(&ps_codec->s_parse.s_bs_ctxt); } else { ihevcd_ctb_boundary_strength_pbslice(&ps_codec->s_parse.s_bs_ctxt); } } else { WORD32 bs_strd = (ps_sps->i2_pic_wd_in_ctb + 1) * (ctb_size * ctb_size / 8 / 16); UWORD32 *pu4_vert_bs = (UWORD32 *)((UWORD8 *)ps_codec->s_parse.s_bs_ctxt.pu4_pic_vert_bs + ps_codec->s_parse.i4_ctb_x * (ctb_size * ctb_size / 8 / 16) + ps_codec->s_parse.i4_ctb_y * bs_strd); UWORD32 *pu4_horz_bs = (UWORD32 *)((UWORD8 *)ps_codec->s_parse.s_bs_ctxt.pu4_pic_horz_bs + ps_codec->s_parse.i4_ctb_x * (ctb_size * ctb_size / 8 / 16) + ps_codec->s_parse.i4_ctb_y * bs_strd); memset(pu4_vert_bs, 0, (ctb_size / 8 + 1) * (ctb_size / 4) / 8 * 2); memset(pu4_horz_bs, 0, (ctb_size / 8) * (ctb_size / 4) / 8 * 2); } } } /* Update the parse status map */ { sps_t *ps_sps = ps_codec->s_parse.ps_sps; UWORD8 *pu1_buf; WORD32 idx; idx = (ps_codec->s_parse.i4_ctb_x); idx += ((ps_codec->s_parse.i4_ctb_y) * ps_sps->i2_pic_wd_in_ctb); pu1_buf = (ps_codec->pu1_parse_map + idx); *pu1_buf = 1; } /* Increment CTB x and y positions */ ps_codec->s_parse.i4_ctb_tile_x++; ps_codec->s_parse.i4_ctb_x++; ps_codec->s_parse.i4_ctb_slice_x++; /*If tiles are enabled, handle the slice counters differently*/ if(ps_pps->i1_tiles_enabled_flag) { tile_start_ctb_idx = ps_tile->u1_pos_x + ps_tile->u1_pos_y * (ps_sps->i2_pic_wd_in_ctb); slice_start_ctb_idx = ps_slice_hdr->i2_ctb_x + ps_slice_hdr->i2_ctb_y * (ps_sps->i2_pic_wd_in_ctb); if((slice_start_ctb_idx < tile_start_ctb_idx)) { if(ps_codec->s_parse.i4_ctb_slice_x == (ps_tile->u1_pos_x + ps_tile->u2_wd)) { /* Reached end of slice row within a tile /frame */ ps_codec->s_parse.i4_ctb_slice_y++; ps_codec->s_parse.i4_ctb_slice_x = ps_tile->u1_pos_x; //todo:Check } } else if(ps_codec->s_parse.i4_ctb_slice_x == (ps_tile->u2_wd)) { ps_codec->s_parse.i4_ctb_slice_y++; ps_codec->s_parse.i4_ctb_slice_x = 0; } } else { if(ps_codec->s_parse.i4_ctb_slice_x == ps_tile->u2_wd) { /* Reached end of slice row within a tile /frame */ ps_codec->s_parse.i4_ctb_slice_y++; ps_codec->s_parse.i4_ctb_slice_x = 0; } } if(ps_codec->s_parse.i4_ctb_tile_x == (ps_tile->u2_wd)) { /* Reached end of tile row */ ps_codec->s_parse.i4_ctb_tile_x = 0; ps_codec->s_parse.i4_ctb_x = ps_tile->u1_pos_x; ps_codec->s_parse.i4_ctb_tile_y++; ps_codec->s_parse.i4_ctb_y++; if(ps_codec->s_parse.i4_ctb_tile_y == (ps_tile->u2_ht)) { /* Reached End of Tile */ ps_codec->s_parse.i4_ctb_tile_y = 0; ps_codec->s_parse.i4_ctb_tile_x = 0; ps_codec->s_parse.ps_tile++; if((ps_tile->u2_ht + ps_tile->u1_pos_y == ps_sps->i2_pic_ht_in_ctb) && (ps_tile->u2_wd + ps_tile->u1_pos_x == ps_sps->i2_pic_wd_in_ctb)) { /* Reached end of frame */ end_of_pic = 1; ps_codec->s_parse.i4_ctb_x = 0; ps_codec->s_parse.i4_ctb_y = ps_sps->i2_pic_ht_in_ctb; } else { /* Initialize ctb_x and ctb_y to start of next tile */ ps_tile = ps_codec->s_parse.ps_tile; ps_codec->s_parse.i4_ctb_x = ps_tile->u1_pos_x; ps_codec->s_parse.i4_ctb_y = ps_tile->u1_pos_y; ps_codec->s_parse.i4_ctb_tile_y = 0; ps_codec->s_parse.i4_ctb_tile_x = 0; ps_codec->s_parse.i4_ctb_slice_x = ps_tile->u1_pos_x; ps_codec->s_parse.i4_ctb_slice_y = ps_tile->u1_pos_y; } } } ps_codec->s_parse.i4_next_ctb_indx = ps_codec->s_parse.i4_ctb_x + ps_codec->s_parse.i4_ctb_y * ps_sps->i2_pic_wd_in_ctb; /* If the current slice is in error, check if the next slice's address * is reached and mark the end_of_slice flag */ if(ps_codec->i4_slice_error) { slice_header_t *ps_slice_hdr_next = ps_slice_hdr + 1; WORD32 next_slice_addr = ps_slice_hdr_next->i2_ctb_x + ps_slice_hdr_next->i2_ctb_y * ps_sps->i2_pic_wd_in_ctb; if(ps_codec->s_parse.i4_next_ctb_indx == next_slice_addr) end_of_slice_flag = 1; } /* If the codec is running in single core mode * then call process function for current CTB */ if((1 == ps_codec->i4_num_cores) && (ps_codec->s_parse.i4_ctb_tile_x == 0)) { process_ctxt_t *ps_proc = &ps_codec->as_process[0]; ps_proc->i4_ctb_cnt = ps_proc->ps_tile->u2_wd; ihevcd_process(ps_proc); } /* If the bytes for the current slice are exhausted * set end_of_slice flag to 1 * This slice will be treated as incomplete */ if((UWORD8 *)ps_codec->s_parse.s_bitstrm.pu1_buf_max + BITSTRM_OFF_THRS < ((UWORD8 *)ps_codec->s_parse.s_bitstrm.pu4_buf + (ps_codec->s_parse.s_bitstrm.u4_bit_ofst / 8))) { if(0 == ps_codec->i4_slice_error) end_of_slice_flag = 1; } if(end_of_pic) break; } while(!end_of_slice_flag); /* Reset slice error */ ps_codec->i4_slice_error = 0; /* Increment the slice index for parsing next slice */ if(0 == end_of_pic) { while(1) { WORD32 parse_slice_idx; parse_slice_idx = ps_codec->s_parse.i4_cur_slice_idx; parse_slice_idx++; { /* If the next slice header is not initialized, update cur_slice_idx and break */ if((1 == ps_codec->i4_num_cores) || (0 != (parse_slice_idx & (MAX_SLICE_HDR_CNT - 1)))) { ps_codec->s_parse.i4_cur_slice_idx = parse_slice_idx; break; } /* If the next slice header is initialised, wait for the parsed slices to be processed */ else { WORD32 ctb_indx = 0; while(ctb_indx != ps_sps->i4_pic_size_in_ctb) { WORD32 parse_status = *(ps_codec->pu1_parse_map + ctb_indx); volatile WORD32 proc_status = *(ps_codec->pu1_proc_map + ctb_indx) & 1; if(parse_status == proc_status) ctb_indx++; } ps_codec->s_parse.i4_cur_slice_idx = parse_slice_idx; break; } } } } else { #if FRAME_ILF_PAD if(FRAME_ILF_PAD && 1 == ps_codec->i4_num_cores) { if(ps_slice_hdr->i4_abs_pic_order_cnt == 0) { DUMP_PRE_ILF(ps_codec->as_process[0].pu1_cur_pic_luma, ps_codec->as_process[0].pu1_cur_pic_chroma, ps_sps->i2_pic_width_in_luma_samples, ps_sps->i2_pic_height_in_luma_samples, ps_codec->i4_strd); DUMP_BS(ps_codec->as_process[0].s_bs_ctxt.pu4_pic_vert_bs, ps_codec->as_process[0].s_bs_ctxt.pu4_pic_horz_bs, ps_sps->i2_pic_wd_in_ctb * (ctb_size * ctb_size / 8 / 16) * ps_sps->i2_pic_ht_in_ctb, (ps_sps->i2_pic_wd_in_ctb + 1) * (ctb_size * ctb_size / 8 / 16) * ps_sps->i2_pic_ht_in_ctb); DUMP_QP(ps_codec->as_process[0].s_bs_ctxt.pu1_pic_qp, (ps_sps->i2_pic_height_in_luma_samples * ps_sps->i2_pic_width_in_luma_samples) / (MIN_CU_SIZE * MIN_CU_SIZE)); DUMP_QP_CONST_IN_CTB(ps_codec->as_process[0].s_bs_ctxt.pu1_pic_qp_const_in_ctb, (ps_sps->i2_pic_height_in_luma_samples * ps_sps->i2_pic_width_in_luma_samples) / (MIN_CTB_SIZE * MIN_CTB_SIZE) / 8); DUMP_NO_LOOP_FILTER(ps_codec->as_process[0].pu1_pic_no_loop_filter_flag, (ps_sps->i2_pic_width_in_luma_samples / MIN_CU_SIZE) * (ps_sps->i2_pic_height_in_luma_samples / MIN_CU_SIZE) / 8); DUMP_OFFSETS(ps_slice_hdr->i1_beta_offset_div2, ps_slice_hdr->i1_tc_offset_div2, ps_pps->i1_pic_cb_qp_offset, ps_pps->i1_pic_cr_qp_offset); } ps_codec->s_parse.s_deblk_ctxt.ps_pps = ps_codec->s_parse.ps_pps; ps_codec->s_parse.s_deblk_ctxt.ps_sps = ps_codec->s_parse.ps_sps; ps_codec->s_parse.s_deblk_ctxt.ps_codec = ps_codec; ps_codec->s_parse.s_deblk_ctxt.ps_slice_hdr = ps_codec->s_parse.ps_slice_hdr; ps_codec->s_parse.s_deblk_ctxt.is_chroma_yuv420sp_vu = (ps_codec->e_ref_chroma_fmt == IV_YUV_420SP_VU); ps_codec->s_parse.s_sao_ctxt.ps_pps = ps_codec->s_parse.ps_pps; ps_codec->s_parse.s_sao_ctxt.ps_sps = ps_codec->s_parse.ps_sps; ps_codec->s_parse.s_sao_ctxt.ps_codec = ps_codec; ps_codec->s_parse.s_sao_ctxt.ps_slice_hdr = ps_codec->s_parse.ps_slice_hdr; ihevcd_ilf_pad_frame(&ps_codec->s_parse.s_deblk_ctxt, &ps_codec->s_parse.s_sao_ctxt); } #endif ps_codec->s_parse.i4_end_of_frame = 1; } return ret; }
174,118
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void BrowserMainParts::PostMainMessageLoopRun() { CompositorUtils::GetInstance()->Shutdown(); } Commit Message: CWE ID: CWE-20
void BrowserMainParts::PostMainMessageLoopRun() { WebContentsUnloader::GetInstance()->Shutdown(); BrowserContextDestroyer::Shutdown(); BrowserContext::AssertNoContextsExist(); CompositorUtils::GetInstance()->Shutdown(); }
165,423
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int parallels_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVParallelsState *s = bs->opaque; int i; struct parallels_header ph; int ret; bs->read_only = 1; // no write support yet ret = bdrv_pread(bs->file, 0, &ph, sizeof(ph)); if (ret < 0) { goto fail; } if (memcmp(ph.magic, HEADER_MAGIC, 16) || (le32_to_cpu(ph.version) != HEADER_VERSION)) { error_setg(errp, "Image not in Parallels format"); ret = -EINVAL; goto fail; } bs->total_sectors = le32_to_cpu(ph.nb_sectors); s->tracks = le32_to_cpu(ph.tracks); s->catalog_size = le32_to_cpu(ph.catalog_entries); s->catalog_bitmap = g_malloc(s->catalog_size * 4); ret = bdrv_pread(bs->file, 64, s->catalog_bitmap, s->catalog_size * 4); le32_to_cpus(&s->catalog_bitmap[i]); qemu_co_mutex_init(&s->lock); return 0; fail: g_free(s->catalog_bitmap); return ret; } Commit Message: CWE ID: CWE-190
static int parallels_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVParallelsState *s = bs->opaque; int i; struct parallels_header ph; int ret; bs->read_only = 1; // no write support yet ret = bdrv_pread(bs->file, 0, &ph, sizeof(ph)); if (ret < 0) { goto fail; } if (memcmp(ph.magic, HEADER_MAGIC, 16) || (le32_to_cpu(ph.version) != HEADER_VERSION)) { error_setg(errp, "Image not in Parallels format"); ret = -EINVAL; goto fail; } bs->total_sectors = le32_to_cpu(ph.nb_sectors); s->tracks = le32_to_cpu(ph.tracks); s->catalog_size = le32_to_cpu(ph.catalog_entries); if (s->catalog_size > INT_MAX / 4) { error_setg(errp, "Catalog too large"); ret = -EFBIG; goto fail; } s->catalog_bitmap = g_malloc(s->catalog_size * 4); ret = bdrv_pread(bs->file, 64, s->catalog_bitmap, s->catalog_size * 4); le32_to_cpus(&s->catalog_bitmap[i]); qemu_co_mutex_init(&s->lock); return 0; fail: g_free(s->catalog_bitmap); return ret; }
165,410
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int key_notify_policy_flush(const struct km_event *c) { struct sk_buff *skb_out; struct sadb_msg *hdr; skb_out = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC); if (!skb_out) return -ENOBUFS; hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg)); hdr->sadb_msg_type = SADB_X_SPDFLUSH; hdr->sadb_msg_seq = c->seq; hdr->sadb_msg_pid = c->portid; hdr->sadb_msg_version = PF_KEY_V2; hdr->sadb_msg_errno = (uint8_t) 0; hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); return 0; } Commit Message: af_key: initialize satype in key_notify_policy_flush() This field was left uninitialized. Some user daemons perform check against this field. Signed-off-by: Nicolas Dichtel <[email protected]> Signed-off-by: Steffen Klassert <[email protected]> CWE ID: CWE-119
static int key_notify_policy_flush(const struct km_event *c) { struct sk_buff *skb_out; struct sadb_msg *hdr; skb_out = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC); if (!skb_out) return -ENOBUFS; hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg)); hdr->sadb_msg_type = SADB_X_SPDFLUSH; hdr->sadb_msg_seq = c->seq; hdr->sadb_msg_pid = c->portid; hdr->sadb_msg_version = PF_KEY_V2; hdr->sadb_msg_errno = (uint8_t) 0; hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); return 0; }
166,073
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void LauncherView::Init() { ResourceBundle& rb = ResourceBundle::GetSharedInstance(); model_->AddObserver(this); const LauncherItems& items(model_->items()); for (LauncherItems::const_iterator i = items.begin(); i != items.end(); ++i) { views::View* child = CreateViewForItem(*i); child->SetPaintToLayer(true); view_model_->Add(child, static_cast<int>(i - items.begin())); AddChildView(child); } UpdateFirstButtonPadding(); overflow_button_ = new views::ImageButton(this); overflow_button_->set_accessibility_focusable(true); overflow_button_->SetImage( views::CustomButton::BS_NORMAL, rb.GetImageNamed(IDR_AURA_LAUNCHER_OVERFLOW).ToImageSkia()); overflow_button_->SetImage( views::CustomButton::BS_HOT, rb.GetImageNamed(IDR_AURA_LAUNCHER_OVERFLOW_HOT).ToImageSkia()); overflow_button_->SetImage( views::CustomButton::BS_PUSHED, rb.GetImageNamed(IDR_AURA_LAUNCHER_OVERFLOW_PUSHED).ToImageSkia()); overflow_button_->SetAccessibleName( l10n_util::GetStringUTF16(IDS_AURA_LAUNCHER_OVERFLOW_NAME)); overflow_button_->set_context_menu_controller(this); ConfigureChildView(overflow_button_); AddChildView(overflow_button_); } Commit Message: ash: Add launcher overflow bubble. - Host a LauncherView in bubble to display overflown items; - Mouse wheel and two-finger scroll to scroll the LauncherView in bubble in case overflow bubble is overflown; - Fit bubble when items are added/removed; - Keep launcher bar on screen when the bubble is shown; BUG=128054 TEST=Verify launcher overflown items are in a bubble instead of menu. Review URL: https://chromiumcodereview.appspot.com/10659003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@146460 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-119
void LauncherView::Init() { ResourceBundle& rb = ResourceBundle::GetSharedInstance(); model_->AddObserver(this); const LauncherItems& items(model_->items()); for (LauncherItems::const_iterator i = items.begin(); i != items.end(); ++i) { views::View* child = CreateViewForItem(*i); child->SetPaintToLayer(true); view_model_->Add(child, static_cast<int>(i - items.begin())); AddChildView(child); } UpdateFirstButtonPadding(); overflow_button_ = new views::ImageButton(this); overflow_button_->set_accessibility_focusable(true); overflow_button_->SetImageAlignment(views::ImageButton::ALIGN_CENTER, views::ImageButton::ALIGN_MIDDLE); overflow_button_->SetImage( views::CustomButton::BS_NORMAL, rb.GetImageNamed(IDR_AURA_LAUNCHER_OVERFLOW).ToImageSkia()); overflow_button_->SetImage( views::CustomButton::BS_HOT, rb.GetImageNamed(IDR_AURA_LAUNCHER_OVERFLOW_HOT).ToImageSkia()); overflow_button_->SetImage( views::CustomButton::BS_PUSHED, rb.GetImageNamed(IDR_AURA_LAUNCHER_OVERFLOW_PUSHED).ToImageSkia()); overflow_button_->SetAccessibleName( l10n_util::GetStringUTF16(IDS_AURA_LAUNCHER_OVERFLOW_NAME)); overflow_button_->set_context_menu_controller(this); ConfigureChildView(overflow_button_); AddChildView(overflow_button_); }
170,890
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: long Segment::DoParseNext(const Cluster*& pResult, long long& pos, long& len) { long long total, avail; long status = m_pReader->Length(&total, &avail); if (status < 0) // error return status; assert((total < 0) || (avail <= total)); const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size; long long off_next = 0; long long cluster_size = -1; for (;;) { if ((total >= 0) && (pos >= total)) return 1; // EOF if ((segment_stop >= 0) && (pos >= segment_stop)) return 1; // EOF if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long idpos = pos; // absolute const long long idoff = pos - m_start; // relative const long long id = ReadUInt(m_pReader, idpos, len); // absolute if (id < 0) // error return static_cast<long>(id); if (id == 0) // weird return -1; // generic error pos += len; // consume ID if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long size = ReadUInt(m_pReader, pos, len); if (size < 0) // error return static_cast<long>(size); pos += len; // consume length of size of element if (size == 0) // weird continue; const long long unknown_size = (1LL << (7 * len)) - 1; if ((segment_stop >= 0) && (size != unknown_size) && ((pos + size) > segment_stop)) { return E_FILE_FORMAT_INVALID; } if (id == 0x0C53BB6B) { // Cues ID if (size == unknown_size) return E_FILE_FORMAT_INVALID; const long long element_stop = pos + size; if ((segment_stop >= 0) && (element_stop > segment_stop)) return E_FILE_FORMAT_INVALID; const long long element_start = idpos; const long long element_size = element_stop - element_start; if (m_pCues == NULL) { m_pCues = new Cues(this, pos, size, element_start, element_size); assert(m_pCues); // TODO } pos += size; // consume payload assert((segment_stop < 0) || (pos <= segment_stop)); continue; } if (id != 0x0F43B675) { // not a Cluster ID if (size == unknown_size) return E_FILE_FORMAT_INVALID; pos += size; // consume payload assert((segment_stop < 0) || (pos <= segment_stop)); continue; } #if 0 // this is commented-out to support incremental cluster parsing len = static_cast<long>(size); if (element_stop > avail) return E_BUFFER_NOT_FULL; #endif off_next = idoff; if (size != unknown_size) cluster_size = size; break; } assert(off_next > 0); // have cluster Cluster** const ii = m_clusters + m_clusterCount; Cluster** i = ii; Cluster** const jj = ii + m_clusterPreloadCount; Cluster** j = jj; while (i < j) { Cluster** const k = i + (j - i) / 2; assert(k < jj); const Cluster* const pNext = *k; assert(pNext); assert(pNext->m_index < 0); pos = pNext->GetPosition(); assert(pos >= 0); if (pos < off_next) i = k + 1; else if (pos > off_next) j = k; else { pResult = pNext; return 0; // success } } assert(i == j); long long pos_; long len_; status = Cluster::HasBlockEntries(this, off_next, pos_, len_); if (status < 0) { // error or underflow pos = pos_; len = len_; return status; } if (status > 0) { // means "found at least one block entry" Cluster* const pNext = Cluster::Create(this, -1, // preloaded off_next); assert(pNext); const ptrdiff_t idx_next = i - m_clusters; // insertion position PreloadCluster(pNext, idx_next); assert(m_clusters); assert(idx_next < m_clusterSize); assert(m_clusters[idx_next] == pNext); pResult = pNext; return 0; // success } if (cluster_size < 0) { // unknown size const long long payload_pos = pos; // absolute pos of cluster payload for (;;) { // determine cluster size if ((total >= 0) && (pos >= total)) break; if ((segment_stop >= 0) && (pos >= segment_stop)) break; // no more clusters if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long idpos = pos; const long long id = ReadUInt(m_pReader, idpos, len); if (id < 0) // error (or underflow) return static_cast<long>(id); if (id == 0x0F43B675) // Cluster ID break; if (id == 0x0C53BB6B) // Cues ID break; pos += len; // consume ID (of sub-element) if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long size = ReadUInt(m_pReader, pos, len); if (size < 0) // error return static_cast<long>(size); pos += len; // consume size field of element if (size == 0) // weird continue; const long long unknown_size = (1LL << (7 * len)) - 1; if (size == unknown_size) return E_FILE_FORMAT_INVALID; // not allowed for sub-elements if ((segment_stop >= 0) && ((pos + size) > segment_stop)) // weird return E_FILE_FORMAT_INVALID; pos += size; // consume payload of sub-element assert((segment_stop < 0) || (pos <= segment_stop)); } // determine cluster size cluster_size = pos - payload_pos; assert(cluster_size >= 0); // TODO: handle cluster_size = 0 pos = payload_pos; // reset and re-parse original cluster } pos += cluster_size; // consume payload assert((segment_stop < 0) || (pos <= segment_stop)); return 2; // try to find a cluster that follows next } Commit Message: external/libvpx/libwebm: Update snapshot Update libwebm snapshot. This update contains security fixes from upstream. Upstream git hash: 229f49347d19b0ca0941e072b199a242ef6c5f2b BUG=23167726 Change-Id: Id3e140e7b31ae11294724b1ecfe2e9c83b4d4207 (cherry picked from commit d0281a15b3c6bd91756e453cc9398c5ef412d99a) CWE ID: CWE-20
long Segment::DoParseNext(const Cluster*& pResult, long long& pos, long& len) { long long total, avail; long status = m_pReader->Length(&total, &avail); if (status < 0) // error return status; assert((total < 0) || (avail <= total)); const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size; long long off_next = 0; long long cluster_size = -1; for (;;) { if ((total >= 0) && (pos >= total)) return 1; // EOF if ((segment_stop >= 0) && (pos >= segment_stop)) return 1; // EOF if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long idpos = pos; // absolute const long long idoff = pos - m_start; // relative const long long id = ReadUInt(m_pReader, idpos, len); // absolute if (id < 0) // error return static_cast<long>(id); if (id == 0) // weird return -1; // generic error pos += len; // consume ID if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long size = ReadUInt(m_pReader, pos, len); if (size < 0) // error return static_cast<long>(size); pos += len; // consume length of size of element if (size == 0) // weird continue; const long long unknown_size = (1LL << (7 * len)) - 1; if ((segment_stop >= 0) && (size != unknown_size) && ((pos + size) > segment_stop)) { return E_FILE_FORMAT_INVALID; } if (id == 0x0C53BB6B) { // Cues ID if (size == unknown_size) return E_FILE_FORMAT_INVALID; const long long element_stop = pos + size; if ((segment_stop >= 0) && (element_stop > segment_stop)) return E_FILE_FORMAT_INVALID; const long long element_start = idpos; const long long element_size = element_stop - element_start; if (m_pCues == NULL) { m_pCues = new (std::nothrow) Cues(this, pos, size, element_start, element_size); if (m_pCues == NULL) return false; } pos += size; // consume payload if (segment_stop >= 0 && pos > segment_stop) return E_FILE_FORMAT_INVALID; continue; } if (id != 0x0F43B675) { // not a Cluster ID if (size == unknown_size) return E_FILE_FORMAT_INVALID; pos += size; // consume payload if (segment_stop >= 0 && pos > segment_stop) return E_FILE_FORMAT_INVALID; continue; } off_next = idoff; if (size != unknown_size) cluster_size = size; break; } assert(off_next > 0); // have cluster Cluster** const ii = m_clusters + m_clusterCount; Cluster** i = ii; Cluster** const jj = ii + m_clusterPreloadCount; Cluster** j = jj; while (i < j) { Cluster** const k = i + (j - i) / 2; assert(k < jj); const Cluster* const pNext = *k; assert(pNext); assert(pNext->m_index < 0); pos = pNext->GetPosition(); assert(pos >= 0); if (pos < off_next) i = k + 1; else if (pos > off_next) j = k; else { pResult = pNext; return 0; // success } } assert(i == j); long long pos_; long len_; status = Cluster::HasBlockEntries(this, off_next, pos_, len_); if (status < 0) { // error or underflow pos = pos_; len = len_; return status; } if (status > 0) { // means "found at least one block entry" Cluster* const pNext = Cluster::Create(this, -1, // preloaded off_next); if (pNext == NULL) return -1; const ptrdiff_t idx_next = i - m_clusters; // insertion position if (!PreloadCluster(pNext, idx_next)) { delete pNext; return -1; } assert(m_clusters); assert(idx_next < m_clusterSize); assert(m_clusters[idx_next] == pNext); pResult = pNext; return 0; // success } if (cluster_size < 0) { // unknown size const long long payload_pos = pos; // absolute pos of cluster payload for (;;) { // determine cluster size if ((total >= 0) && (pos >= total)) break; if ((segment_stop >= 0) && (pos >= segment_stop)) break; // no more clusters if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long idpos = pos; const long long id = ReadUInt(m_pReader, idpos, len); if (id < 0) // error (or underflow) return static_cast<long>(id); if (id == 0x0F43B675) // Cluster ID break; if (id == 0x0C53BB6B) // Cues ID break; pos += len; // consume ID (of sub-element) if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long size = ReadUInt(m_pReader, pos, len); if (size < 0) // error return static_cast<long>(size); pos += len; // consume size field of element if (size == 0) // weird continue; const long long unknown_size = (1LL << (7 * len)) - 1; if (size == unknown_size) return E_FILE_FORMAT_INVALID; // not allowed for sub-elements if ((segment_stop >= 0) && ((pos + size) > segment_stop)) // weird return E_FILE_FORMAT_INVALID; pos += size; // consume payload of sub-element if (segment_stop >= 0 && pos > segment_stop) return E_FILE_FORMAT_INVALID; } // determine cluster size cluster_size = pos - payload_pos; assert(cluster_size >= 0); // TODO: handle cluster_size = 0 pos = payload_pos; // reset and re-parse original cluster } pos += cluster_size; // consume payload if (segment_stop >= 0 && pos > segment_stop) return E_FILE_FORMAT_INVALID; return 2; // try to find a cluster that follows next }
173,810
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: png_info_init_3(png_infopp ptr_ptr, png_size_t png_info_struct_size) { png_infop info_ptr = *ptr_ptr; png_debug(1, "in png_info_init_3"); if (info_ptr == NULL) return; if (png_sizeof(png_info) > png_info_struct_size) { png_destroy_struct(info_ptr); info_ptr = (png_infop)png_create_struct(PNG_STRUCT_INFO); *ptr_ptr = info_ptr; } /* Set everything to 0 */ png_memset(info_ptr, 0, png_sizeof(png_info)); } Commit Message: third_party/libpng: update to 1.2.54 [email protected] BUG=560291 Review URL: https://codereview.chromium.org/1467263003 Cr-Commit-Position: refs/heads/master@{#362298} CWE ID: CWE-119
png_info_init_3(png_infopp ptr_ptr, png_size_t png_info_struct_size) { png_infop info_ptr = *ptr_ptr; png_debug(1, "in png_info_init_3"); if (info_ptr == NULL) return; if (png_sizeof(png_info) > png_info_struct_size) { png_destroy_struct(info_ptr); info_ptr = (png_infop)png_create_struct(PNG_STRUCT_INFO); *ptr_ptr = info_ptr; if (info_ptr == NULL) return; } /* Set everything to 0 */ png_memset(info_ptr, 0, png_sizeof(png_info)); }
172,163
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void SerializerMarkupAccumulator::appendText(StringBuilder& result, Text* text) { Element* parent = text->parentElement(); if (parent && !shouldIgnoreElement(parent)) MarkupAccumulator::appendText(result, text); } Commit Message: Revert 162155 "This review merges the two existing page serializ..." Change r162155 broke the world even though it was landed using the CQ. > This review merges the two existing page serializers, WebPageSerializerImpl and > PageSerializer, into one, PageSerializer. In addition to this it moves all > the old tests from WebPageNewSerializerTest and WebPageSerializerTest to the > PageSerializerTest structure and splits out one test for MHTML into a new > MHTMLTest file. > > Saving as 'Webpage, Complete', 'Webpage, HTML Only' and as MHTML when the > 'Save Page as MHTML' flag is enabled now uses the same code, and should thus > have the same feature set. Meaning that both modes now should be a bit better. > > Detailed list of changes: > > - PageSerializerTest: Prepare for more DTD test > - PageSerializerTest: Remove now unneccesary input image test > - PageSerializerTest: Remove unused WebPageSerializer/Impl code > - PageSerializerTest: Move data URI morph test > - PageSerializerTest: Move data URI test > - PageSerializerTest: Move namespace test > - PageSerializerTest: Move SVG Image test > - MHTMLTest: Move MHTML specific test to own test file > - PageSerializerTest: Delete duplicate XML header test > - PageSerializerTest: Move blank frame test > - PageSerializerTest: Move CSS test > - PageSerializerTest: Add frameset/frame test > - PageSerializerTest: Move old iframe test > - PageSerializerTest: Move old elements test > - Use PageSerizer for saving web pages > - PageSerializerTest: Test for rewriting links > - PageSerializer: Add rewrite link accumulator > - PageSerializer: Serialize images in iframes/frames src > - PageSerializer: XHTML fix for meta tags > - PageSerializer: Add presentation CSS > - PageSerializer: Rename out parameter > > BUG= > [email protected] > > Review URL: https://codereview.chromium.org/68613003 [email protected] Review URL: https://codereview.chromium.org/73673003 git-svn-id: svn://svn.chromium.org/blink/trunk@162156 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-119
void SerializerMarkupAccumulator::appendText(StringBuilder& result, Text* text) void SerializerMarkupAccumulator::appendText(StringBuilder& out, Text* text) { Element* parent = text->parentElement(); if (parent && !shouldIgnoreElement(parent)) MarkupAccumulator::appendText(out, text); }
171,569
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int ssl3_get_record(SSL *s) { int ssl_major, ssl_minor, al; int enc_err, n, i, ret = -1; SSL3_RECORD *rr; SSL3_BUFFER *rbuf; SSL_SESSION *sess; unsigned char *p; unsigned char md[EVP_MAX_MD_SIZE]; short version; unsigned mac_size; unsigned int num_recs = 0; unsigned int max_recs; unsigned int j; rr = RECORD_LAYER_get_rrec(&s->rlayer); rbuf = RECORD_LAYER_get_rbuf(&s->rlayer); max_recs = s->max_pipelines; if (max_recs == 0) max_recs = 1; sess = s->session; do { /* check if we have the header */ if ((RECORD_LAYER_get_rstate(&s->rlayer) != SSL_ST_READ_BODY) || (RECORD_LAYER_get_packet_length(&s->rlayer) < SSL3_RT_HEADER_LENGTH)) { n = ssl3_read_n(s, SSL3_RT_HEADER_LENGTH, SSL3_BUFFER_get_len(rbuf), 0, num_recs == 0 ? 1 : 0); if (n <= 0) return (n); /* error or non-blocking */ RECORD_LAYER_set_rstate(&s->rlayer, SSL_ST_READ_BODY); p = RECORD_LAYER_get_packet(&s->rlayer); /* * The first record received by the server may be a V2ClientHello. */ if (s->server && RECORD_LAYER_is_first_record(&s->rlayer) && (p[0] & 0x80) && (p[2] == SSL2_MT_CLIENT_HELLO)) { /* * SSLv2 style record * * |num_recs| here will actually always be 0 because * |num_recs > 0| only ever occurs when we are processing * multiple app data records - which we know isn't the case here * because it is an SSLv2ClientHello. We keep it using * |num_recs| for the sake of consistency */ rr[num_recs].type = SSL3_RT_HANDSHAKE; rr[num_recs].rec_version = SSL2_VERSION; rr[num_recs].length = ((p[0] & 0x7f) << 8) | p[1]; if (rr[num_recs].length > SSL3_BUFFER_get_len(rbuf) - SSL2_RT_HEADER_LENGTH) { al = SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_PACKET_LENGTH_TOO_LONG); goto f_err; } if (rr[num_recs].length < MIN_SSL2_RECORD_LEN) { al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_LENGTH_TOO_SHORT); goto f_err; } } else { /* SSLv3+ style record */ if (s->msg_callback) s->msg_callback(0, 0, SSL3_RT_HEADER, p, 5, s, s->msg_callback_arg); /* Pull apart the header into the SSL3_RECORD */ rr[num_recs].type = *(p++); ssl_major = *(p++); ssl_minor = *(p++); version = (ssl_major << 8) | ssl_minor; rr[num_recs].rec_version = version; n2s(p, rr[num_recs].length); /* Lets check version */ if (!s->first_packet && version != s->version) { SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_WRONG_VERSION_NUMBER); if ((s->version & 0xFF00) == (version & 0xFF00) && !s->enc_write_ctx && !s->write_hash) { if (rr->type == SSL3_RT_ALERT) { /* * The record is using an incorrect version number, * but what we've got appears to be an alert. We * haven't read the body yet to check whether its a * fatal or not - but chances are it is. We probably * shouldn't send a fatal alert back. We'll just * end. */ goto err; } /* * Send back error using their minor version number :-) */ s->version = (unsigned short)version; } al = SSL_AD_PROTOCOL_VERSION; goto f_err; } if ((version >> 8) != SSL3_VERSION_MAJOR) { if (RECORD_LAYER_is_first_record(&s->rlayer)) { /* Go back to start of packet, look at the five bytes * that we have. */ p = RECORD_LAYER_get_packet(&s->rlayer); if (strncmp((char *)p, "GET ", 4) == 0 || strncmp((char *)p, "POST ", 5) == 0 || strncmp((char *)p, "HEAD ", 5) == 0 || strncmp((char *)p, "PUT ", 4) == 0) { SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_HTTP_REQUEST); goto err; } else if (strncmp((char *)p, "CONNE", 5) == 0) { SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_HTTPS_PROXY_REQUEST); goto err; } /* Doesn't look like TLS - don't send an alert */ SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_WRONG_VERSION_NUMBER); goto err; } else { SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_WRONG_VERSION_NUMBER); al = SSL_AD_PROTOCOL_VERSION; goto f_err; } } if (rr[num_recs].length > SSL3_BUFFER_get_len(rbuf) - SSL3_RT_HEADER_LENGTH) { al = SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_PACKET_LENGTH_TOO_LONG); goto f_err; } } /* now s->rlayer.rstate == SSL_ST_READ_BODY */ } /* * s->rlayer.rstate == SSL_ST_READ_BODY, get and decode the data. * Calculate how much more data we need to read for the rest of the * record */ if (rr[num_recs].rec_version == SSL2_VERSION) { i = rr[num_recs].length + SSL2_RT_HEADER_LENGTH - SSL3_RT_HEADER_LENGTH; } else { i = rr[num_recs].length; } if (i > 0) { /* now s->packet_length == SSL3_RT_HEADER_LENGTH */ n = ssl3_read_n(s, i, i, 1, 0); if (n <= 0) return (n); /* error or non-blocking io */ } /* set state for later operations */ RECORD_LAYER_set_rstate(&s->rlayer, SSL_ST_READ_HEADER); /* * At this point, s->packet_length == SSL3_RT_HEADER_LENGTH + rr->length, * or s->packet_length == SSL2_RT_HEADER_LENGTH + rr->length * and we have that many bytes in s->packet */ if (rr[num_recs].rec_version == SSL2_VERSION) { rr[num_recs].input = &(RECORD_LAYER_get_packet(&s->rlayer)[SSL2_RT_HEADER_LENGTH]); } else { rr[num_recs].input = &(RECORD_LAYER_get_packet(&s->rlayer)[SSL3_RT_HEADER_LENGTH]); } /* * ok, we can now read from 's->packet' data into 'rr' rr->input points * at rr->length bytes, which need to be copied into rr->data by either * the decryption or by the decompression When the data is 'copied' into * the rr->data buffer, rr->input will be pointed at the new buffer */ /* * We now have - encrypted [ MAC [ compressed [ plain ] ] ] rr->length * bytes of encrypted compressed stuff. */ /* check is not needed I believe */ if (rr[num_recs].length > SSL3_RT_MAX_ENCRYPTED_LENGTH) { al = SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_ENCRYPTED_LENGTH_TOO_LONG); goto f_err; } /* decrypt in place in 'rr->input' */ rr[num_recs].data = rr[num_recs].input; rr[num_recs].orig_len = rr[num_recs].length; /* Mark this record as not read by upper layers yet */ rr[num_recs].read = 0; num_recs++; /* we have pulled in a full packet so zero things */ RECORD_LAYER_reset_packet_length(&s->rlayer); RECORD_LAYER_clear_first_record(&s->rlayer); } while (num_recs < max_recs && rr[num_recs - 1].type == SSL3_RT_APPLICATION_DATA && SSL_USE_EXPLICIT_IV(s) && s->enc_read_ctx != NULL && (EVP_CIPHER_flags(EVP_CIPHER_CTX_cipher(s->enc_read_ctx)) & EVP_CIPH_FLAG_PIPELINE) && ssl3_record_app_data_waiting(s)); /* * If in encrypt-then-mac mode calculate mac from encrypted record. All * the details below are public so no timing details can leak. */ if (SSL_USE_ETM(s) && s->read_hash) { unsigned char *mac; mac_size = EVP_MD_CTX_size(s->read_hash); OPENSSL_assert(mac_size <= EVP_MAX_MD_SIZE); for (j = 0; j < num_recs; j++) { if (rr[j].length < mac_size) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_LENGTH_TOO_SHORT); goto f_err; } rr[j].length -= mac_size; mac = rr[j].data + rr[j].length; i = s->method->ssl3_enc->mac(s, &rr[j], md, 0 /* not send */ ); if (i < 0 || CRYPTO_memcmp(md, mac, (size_t)mac_size) != 0) { al = SSL_AD_BAD_RECORD_MAC; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC); goto f_err; } } } enc_err = s->method->ssl3_enc->enc(s, rr, num_recs, 0); /*- * enc_err is: * 0: (in non-constant time) if the record is publically invalid. * 1: if the padding is valid * -1: if the padding is invalid */ if (enc_err == 0) { al = SSL_AD_DECRYPTION_FAILED; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_BLOCK_CIPHER_PAD_IS_WRONG); goto f_err; } #ifdef SSL_DEBUG printf("dec %d\n", rr->length); { unsigned int z; for (z = 0; z < rr->length; z++) printf("%02X%c", rr->data[z], ((z + 1) % 16) ? ' ' : '\n'); } printf("\n"); #endif /* r->length is now the compressed data plus mac */ if ((sess != NULL) && (s->enc_read_ctx != NULL) && (EVP_MD_CTX_md(s->read_hash) != NULL) && !SSL_USE_ETM(s)) { /* s->read_hash != NULL => mac_size != -1 */ unsigned char *mac = NULL; unsigned char mac_tmp[EVP_MAX_MD_SIZE]; mac_size = EVP_MD_CTX_size(s->read_hash); OPENSSL_assert(mac_size <= EVP_MAX_MD_SIZE); for (j = 0; j < num_recs; j++) { /* * orig_len is the length of the record before any padding was * removed. This is public information, as is the MAC in use, * therefore we can safely process the record in a different amount * of time if it's too short to possibly contain a MAC. */ if (rr[j].orig_len < mac_size || /* CBC records must have a padding length byte too. */ (EVP_CIPHER_CTX_mode(s->enc_read_ctx) == EVP_CIPH_CBC_MODE && rr[j].orig_len < mac_size + 1)) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_LENGTH_TOO_SHORT); goto f_err; } if (EVP_CIPHER_CTX_mode(s->enc_read_ctx) == EVP_CIPH_CBC_MODE) { /* * We update the length so that the TLS header bytes can be * constructed correctly but we need to extract the MAC in * constant time from within the record, without leaking the * contents of the padding bytes. */ mac = mac_tmp; ssl3_cbc_copy_mac(mac_tmp, &rr[j], mac_size); rr[j].length -= mac_size; } else { /* * In this case there's no padding, so |rec->orig_len| equals * |rec->length| and we checked that there's enough bytes for * |mac_size| above. */ rr[j].length -= mac_size; mac = &rr[j].data[rr[j].length]; } i = s->method->ssl3_enc->mac(s, &rr[j], md, 0 /* not send */ ); if (i < 0 || mac == NULL || CRYPTO_memcmp(md, mac, (size_t)mac_size) != 0) enc_err = -1; if (rr->length > SSL3_RT_MAX_COMPRESSED_LENGTH + mac_size) enc_err = -1; } } if (enc_err < 0) { /* * A separate 'decryption_failed' alert was introduced with TLS 1.0, * SSL 3.0 only has 'bad_record_mac'. But unless a decryption * failure is directly visible from the ciphertext anyway, we should * not reveal which kind of error occurred -- this might become * visible to an attacker (e.g. via a logfile) */ al = SSL_AD_BAD_RECORD_MAC; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC); goto f_err; } for (j = 0; j < num_recs; j++) { /* rr[j].length is now just compressed */ if (s->expand != NULL) { if (rr[j].length > SSL3_RT_MAX_COMPRESSED_LENGTH) { al = SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_COMPRESSED_LENGTH_TOO_LONG); goto f_err; } if (!ssl3_do_uncompress(s, &rr[j])) { al = SSL_AD_DECOMPRESSION_FAILURE; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_BAD_DECOMPRESSION); goto f_err; } } if (rr[j].length > SSL3_RT_MAX_PLAIN_LENGTH) { al = SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_DATA_LENGTH_TOO_LONG); goto f_err; } rr[j].off = 0; /*- * So at this point the following is true * rr[j].type is the type of record * rr[j].length == number of bytes in record * rr[j].off == offset to first valid byte * rr[j].data == where to take bytes from, increment after use :-). */ /* just read a 0 length packet */ if (rr[j].length == 0) { RECORD_LAYER_inc_empty_record_count(&s->rlayer); if (RECORD_LAYER_get_empty_record_count(&s->rlayer) > MAX_EMPTY_RECORDS) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_RECORD_TOO_SMALL); goto f_err; } } else { RECORD_LAYER_reset_empty_record_count(&s->rlayer); } } RECORD_LAYER_set_numrpipes(&s->rlayer, num_recs); return 1; f_err: ssl3_send_alert(s, SSL3_AL_FATAL, al); err: return ret; } Commit Message: Don't change the state of the ETM flags until CCS processing Changing the ciphersuite during a renegotiation can result in a crash leading to a DoS attack. ETM has not been implemented in 1.1.0 for DTLS so this is TLS only. The problem is caused by changing the flag indicating whether to use ETM or not immediately on negotiation of ETM, rather than at CCS. Therefore, during a renegotiation, if the ETM state is changing (usually due to a change of ciphersuite), then an error/crash will occur. Due to the fact that there are separate CCS messages for read and write we actually now need two flags to determine whether to use ETM or not. CVE-2017-3733 Reviewed-by: Richard Levitte <[email protected]> CWE ID: CWE-20
int ssl3_get_record(SSL *s) { int ssl_major, ssl_minor, al; int enc_err, n, i, ret = -1; SSL3_RECORD *rr; SSL3_BUFFER *rbuf; SSL_SESSION *sess; unsigned char *p; unsigned char md[EVP_MAX_MD_SIZE]; short version; unsigned mac_size; unsigned int num_recs = 0; unsigned int max_recs; unsigned int j; rr = RECORD_LAYER_get_rrec(&s->rlayer); rbuf = RECORD_LAYER_get_rbuf(&s->rlayer); max_recs = s->max_pipelines; if (max_recs == 0) max_recs = 1; sess = s->session; do { /* check if we have the header */ if ((RECORD_LAYER_get_rstate(&s->rlayer) != SSL_ST_READ_BODY) || (RECORD_LAYER_get_packet_length(&s->rlayer) < SSL3_RT_HEADER_LENGTH)) { n = ssl3_read_n(s, SSL3_RT_HEADER_LENGTH, SSL3_BUFFER_get_len(rbuf), 0, num_recs == 0 ? 1 : 0); if (n <= 0) return (n); /* error or non-blocking */ RECORD_LAYER_set_rstate(&s->rlayer, SSL_ST_READ_BODY); p = RECORD_LAYER_get_packet(&s->rlayer); /* * The first record received by the server may be a V2ClientHello. */ if (s->server && RECORD_LAYER_is_first_record(&s->rlayer) && (p[0] & 0x80) && (p[2] == SSL2_MT_CLIENT_HELLO)) { /* * SSLv2 style record * * |num_recs| here will actually always be 0 because * |num_recs > 0| only ever occurs when we are processing * multiple app data records - which we know isn't the case here * because it is an SSLv2ClientHello. We keep it using * |num_recs| for the sake of consistency */ rr[num_recs].type = SSL3_RT_HANDSHAKE; rr[num_recs].rec_version = SSL2_VERSION; rr[num_recs].length = ((p[0] & 0x7f) << 8) | p[1]; if (rr[num_recs].length > SSL3_BUFFER_get_len(rbuf) - SSL2_RT_HEADER_LENGTH) { al = SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_PACKET_LENGTH_TOO_LONG); goto f_err; } if (rr[num_recs].length < MIN_SSL2_RECORD_LEN) { al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_LENGTH_TOO_SHORT); goto f_err; } } else { /* SSLv3+ style record */ if (s->msg_callback) s->msg_callback(0, 0, SSL3_RT_HEADER, p, 5, s, s->msg_callback_arg); /* Pull apart the header into the SSL3_RECORD */ rr[num_recs].type = *(p++); ssl_major = *(p++); ssl_minor = *(p++); version = (ssl_major << 8) | ssl_minor; rr[num_recs].rec_version = version; n2s(p, rr[num_recs].length); /* Lets check version */ if (!s->first_packet && version != s->version) { SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_WRONG_VERSION_NUMBER); if ((s->version & 0xFF00) == (version & 0xFF00) && !s->enc_write_ctx && !s->write_hash) { if (rr->type == SSL3_RT_ALERT) { /* * The record is using an incorrect version number, * but what we've got appears to be an alert. We * haven't read the body yet to check whether its a * fatal or not - but chances are it is. We probably * shouldn't send a fatal alert back. We'll just * end. */ goto err; } /* * Send back error using their minor version number :-) */ s->version = (unsigned short)version; } al = SSL_AD_PROTOCOL_VERSION; goto f_err; } if ((version >> 8) != SSL3_VERSION_MAJOR) { if (RECORD_LAYER_is_first_record(&s->rlayer)) { /* Go back to start of packet, look at the five bytes * that we have. */ p = RECORD_LAYER_get_packet(&s->rlayer); if (strncmp((char *)p, "GET ", 4) == 0 || strncmp((char *)p, "POST ", 5) == 0 || strncmp((char *)p, "HEAD ", 5) == 0 || strncmp((char *)p, "PUT ", 4) == 0) { SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_HTTP_REQUEST); goto err; } else if (strncmp((char *)p, "CONNE", 5) == 0) { SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_HTTPS_PROXY_REQUEST); goto err; } /* Doesn't look like TLS - don't send an alert */ SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_WRONG_VERSION_NUMBER); goto err; } else { SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_WRONG_VERSION_NUMBER); al = SSL_AD_PROTOCOL_VERSION; goto f_err; } } if (rr[num_recs].length > SSL3_BUFFER_get_len(rbuf) - SSL3_RT_HEADER_LENGTH) { al = SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_PACKET_LENGTH_TOO_LONG); goto f_err; } } /* now s->rlayer.rstate == SSL_ST_READ_BODY */ } /* * s->rlayer.rstate == SSL_ST_READ_BODY, get and decode the data. * Calculate how much more data we need to read for the rest of the * record */ if (rr[num_recs].rec_version == SSL2_VERSION) { i = rr[num_recs].length + SSL2_RT_HEADER_LENGTH - SSL3_RT_HEADER_LENGTH; } else { i = rr[num_recs].length; } if (i > 0) { /* now s->packet_length == SSL3_RT_HEADER_LENGTH */ n = ssl3_read_n(s, i, i, 1, 0); if (n <= 0) return (n); /* error or non-blocking io */ } /* set state for later operations */ RECORD_LAYER_set_rstate(&s->rlayer, SSL_ST_READ_HEADER); /* * At this point, s->packet_length == SSL3_RT_HEADER_LENGTH + rr->length, * or s->packet_length == SSL2_RT_HEADER_LENGTH + rr->length * and we have that many bytes in s->packet */ if (rr[num_recs].rec_version == SSL2_VERSION) { rr[num_recs].input = &(RECORD_LAYER_get_packet(&s->rlayer)[SSL2_RT_HEADER_LENGTH]); } else { rr[num_recs].input = &(RECORD_LAYER_get_packet(&s->rlayer)[SSL3_RT_HEADER_LENGTH]); } /* * ok, we can now read from 's->packet' data into 'rr' rr->input points * at rr->length bytes, which need to be copied into rr->data by either * the decryption or by the decompression When the data is 'copied' into * the rr->data buffer, rr->input will be pointed at the new buffer */ /* * We now have - encrypted [ MAC [ compressed [ plain ] ] ] rr->length * bytes of encrypted compressed stuff. */ /* check is not needed I believe */ if (rr[num_recs].length > SSL3_RT_MAX_ENCRYPTED_LENGTH) { al = SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_ENCRYPTED_LENGTH_TOO_LONG); goto f_err; } /* decrypt in place in 'rr->input' */ rr[num_recs].data = rr[num_recs].input; rr[num_recs].orig_len = rr[num_recs].length; /* Mark this record as not read by upper layers yet */ rr[num_recs].read = 0; num_recs++; /* we have pulled in a full packet so zero things */ RECORD_LAYER_reset_packet_length(&s->rlayer); RECORD_LAYER_clear_first_record(&s->rlayer); } while (num_recs < max_recs && rr[num_recs - 1].type == SSL3_RT_APPLICATION_DATA && SSL_USE_EXPLICIT_IV(s) && s->enc_read_ctx != NULL && (EVP_CIPHER_flags(EVP_CIPHER_CTX_cipher(s->enc_read_ctx)) & EVP_CIPH_FLAG_PIPELINE) && ssl3_record_app_data_waiting(s)); /* * If in encrypt-then-mac mode calculate mac from encrypted record. All * the details below are public so no timing details can leak. */ if (SSL_READ_ETM(s) && s->read_hash) { unsigned char *mac; mac_size = EVP_MD_CTX_size(s->read_hash); OPENSSL_assert(mac_size <= EVP_MAX_MD_SIZE); for (j = 0; j < num_recs; j++) { if (rr[j].length < mac_size) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_LENGTH_TOO_SHORT); goto f_err; } rr[j].length -= mac_size; mac = rr[j].data + rr[j].length; i = s->method->ssl3_enc->mac(s, &rr[j], md, 0 /* not send */ ); if (i < 0 || CRYPTO_memcmp(md, mac, (size_t)mac_size) != 0) { al = SSL_AD_BAD_RECORD_MAC; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC); goto f_err; } } } enc_err = s->method->ssl3_enc->enc(s, rr, num_recs, 0); /*- * enc_err is: * 0: (in non-constant time) if the record is publically invalid. * 1: if the padding is valid * -1: if the padding is invalid */ if (enc_err == 0) { al = SSL_AD_DECRYPTION_FAILED; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_BLOCK_CIPHER_PAD_IS_WRONG); goto f_err; } #ifdef SSL_DEBUG printf("dec %d\n", rr->length); { unsigned int z; for (z = 0; z < rr->length; z++) printf("%02X%c", rr->data[z], ((z + 1) % 16) ? ' ' : '\n'); } printf("\n"); #endif /* r->length is now the compressed data plus mac */ if ((sess != NULL) && (s->enc_read_ctx != NULL) && (!SSL_READ_ETM(s) && EVP_MD_CTX_md(s->read_hash) != NULL)) { /* s->read_hash != NULL => mac_size != -1 */ unsigned char *mac = NULL; unsigned char mac_tmp[EVP_MAX_MD_SIZE]; mac_size = EVP_MD_CTX_size(s->read_hash); OPENSSL_assert(mac_size <= EVP_MAX_MD_SIZE); for (j = 0; j < num_recs; j++) { /* * orig_len is the length of the record before any padding was * removed. This is public information, as is the MAC in use, * therefore we can safely process the record in a different amount * of time if it's too short to possibly contain a MAC. */ if (rr[j].orig_len < mac_size || /* CBC records must have a padding length byte too. */ (EVP_CIPHER_CTX_mode(s->enc_read_ctx) == EVP_CIPH_CBC_MODE && rr[j].orig_len < mac_size + 1)) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_LENGTH_TOO_SHORT); goto f_err; } if (EVP_CIPHER_CTX_mode(s->enc_read_ctx) == EVP_CIPH_CBC_MODE) { /* * We update the length so that the TLS header bytes can be * constructed correctly but we need to extract the MAC in * constant time from within the record, without leaking the * contents of the padding bytes. */ mac = mac_tmp; ssl3_cbc_copy_mac(mac_tmp, &rr[j], mac_size); rr[j].length -= mac_size; } else { /* * In this case there's no padding, so |rec->orig_len| equals * |rec->length| and we checked that there's enough bytes for * |mac_size| above. */ rr[j].length -= mac_size; mac = &rr[j].data[rr[j].length]; } i = s->method->ssl3_enc->mac(s, &rr[j], md, 0 /* not send */ ); if (i < 0 || mac == NULL || CRYPTO_memcmp(md, mac, (size_t)mac_size) != 0) enc_err = -1; if (rr->length > SSL3_RT_MAX_COMPRESSED_LENGTH + mac_size) enc_err = -1; } } if (enc_err < 0) { /* * A separate 'decryption_failed' alert was introduced with TLS 1.0, * SSL 3.0 only has 'bad_record_mac'. But unless a decryption * failure is directly visible from the ciphertext anyway, we should * not reveal which kind of error occurred -- this might become * visible to an attacker (e.g. via a logfile) */ al = SSL_AD_BAD_RECORD_MAC; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC); goto f_err; } for (j = 0; j < num_recs; j++) { /* rr[j].length is now just compressed */ if (s->expand != NULL) { if (rr[j].length > SSL3_RT_MAX_COMPRESSED_LENGTH) { al = SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_COMPRESSED_LENGTH_TOO_LONG); goto f_err; } if (!ssl3_do_uncompress(s, &rr[j])) { al = SSL_AD_DECOMPRESSION_FAILURE; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_BAD_DECOMPRESSION); goto f_err; } } if (rr[j].length > SSL3_RT_MAX_PLAIN_LENGTH) { al = SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_DATA_LENGTH_TOO_LONG); goto f_err; } rr[j].off = 0; /*- * So at this point the following is true * rr[j].type is the type of record * rr[j].length == number of bytes in record * rr[j].off == offset to first valid byte * rr[j].data == where to take bytes from, increment after use :-). */ /* just read a 0 length packet */ if (rr[j].length == 0) { RECORD_LAYER_inc_empty_record_count(&s->rlayer); if (RECORD_LAYER_get_empty_record_count(&s->rlayer) > MAX_EMPTY_RECORDS) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_RECORD_TOO_SMALL); goto f_err; } } else { RECORD_LAYER_reset_empty_record_count(&s->rlayer); } } RECORD_LAYER_set_numrpipes(&s->rlayer, num_recs); return 1; f_err: ssl3_send_alert(s, SSL3_AL_FATAL, al); err: return ret; }
168,422
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) { struct sctp_association *asoc = sctp_id2assoc(sk, id); struct sctp_sock *sp = sctp_sk(sk); struct socket *sock; int err = 0; if (!asoc) return -EINVAL; /* If there is a thread waiting on more sndbuf space for * sending on this asoc, it cannot be peeled. */ if (waitqueue_active(&asoc->wait)) return -EBUSY; /* An association cannot be branched off from an already peeled-off * socket, nor is this supported for tcp style sockets. */ if (!sctp_style(sk, UDP)) return -EINVAL; /* Create a new socket. */ err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); if (err < 0) return err; sctp_copy_sock(sock->sk, sk, asoc); /* Make peeled-off sockets more like 1-1 accepted sockets. * Set the daddr and initialize id to something more random */ sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk); /* Populate the fields of the newsk from the oldsk and migrate the * asoc to the newsk. */ sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); *sockp = sock; return err; } Commit Message: sctp: do not peel off an assoc from one netns to another one Now when peeling off an association to the sock in another netns, all transports in this assoc are not to be rehashed and keep use the old key in hashtable. As a transport uses sk->net as the hash key to insert into hashtable, it would miss removing these transports from hashtable due to the new netns when closing the sock and all transports are being freeed, then later an use-after-free issue could be caused when looking up an asoc and dereferencing those transports. This is a very old issue since very beginning, ChunYu found it with syzkaller fuzz testing with this series: socket$inet6_sctp() bind$inet6() sendto$inet6() unshare(0x40000000) getsockopt$inet_sctp6_SCTP_GET_ASSOC_ID_LIST() getsockopt$inet_sctp6_SCTP_SOCKOPT_PEELOFF() This patch is to block this call when peeling one assoc off from one netns to another one, so that the netns of all transport would not go out-sync with the key in hashtable. Note that this patch didn't fix it by rehashing transports, as it's difficult to handle the situation when the tuple is already in use in the new netns. Besides, no one would like to peel off one assoc to another netns, considering ipaddrs, ifaces, etc. are usually different. Reported-by: ChunYu Wang <[email protected]> Signed-off-by: Xin Long <[email protected]> Acked-by: Marcelo Ricardo Leitner <[email protected]> Acked-by: Neil Horman <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-416
int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) { struct sctp_association *asoc = sctp_id2assoc(sk, id); struct sctp_sock *sp = sctp_sk(sk); struct socket *sock; int err = 0; /* Do not peel off from one netns to another one. */ if (!net_eq(current->nsproxy->net_ns, sock_net(sk))) return -EINVAL; if (!asoc) return -EINVAL; /* If there is a thread waiting on more sndbuf space for * sending on this asoc, it cannot be peeled. */ if (waitqueue_active(&asoc->wait)) return -EBUSY; /* An association cannot be branched off from an already peeled-off * socket, nor is this supported for tcp style sockets. */ if (!sctp_style(sk, UDP)) return -EINVAL; /* Create a new socket. */ err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); if (err < 0) return err; sctp_copy_sock(sock->sk, sk, asoc); /* Make peeled-off sockets more like 1-1 accepted sockets. * Set the daddr and initialize id to something more random */ sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk); /* Populate the fields of the newsk from the oldsk and migrate the * asoc to the newsk. */ sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); *sockp = sock; return err; }
167,736
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void php_wddx_push_element(void *user_data, const XML_Char *name, const XML_Char **atts) { st_entry ent; wddx_stack *stack = (wddx_stack *)user_data; if (!strcmp(name, EL_PACKET)) { int i; if (atts) for (i=0; atts[i]; i++) { if (!strcmp(atts[i], EL_VERSION)) { /* nothing for now */ } } } else if (!strcmp(name, EL_STRING)) { ent.type = ST_STRING; SET_STACK_VARNAME; ALLOC_ZVAL(ent.data); INIT_PZVAL(ent.data); Z_TYPE_P(ent.data) = IS_STRING; Z_STRVAL_P(ent.data) = STR_EMPTY_ALLOC(); Z_STRLEN_P(ent.data) = 0; wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); } else if (!strcmp(name, EL_BINARY)) { ent.type = ST_BINARY; SET_STACK_VARNAME; ALLOC_ZVAL(ent.data); INIT_PZVAL(ent.data); Z_TYPE_P(ent.data) = IS_STRING; Z_STRVAL_P(ent.data) = STR_EMPTY_ALLOC(); Z_STRLEN_P(ent.data) = 0; wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); } else if (!strcmp(name, EL_CHAR)) { int i; if (atts) for (i = 0; atts[i]; i++) { if (!strcmp(atts[i], EL_CHAR_CODE) && atts[i+1] && atts[i+1][0]) { char tmp_buf[2]; snprintf(tmp_buf, sizeof(tmp_buf), "%c", (char)strtol(atts[i+1], NULL, 16)); php_wddx_process_data(user_data, tmp_buf, strlen(tmp_buf)); break; } } } else if (!strcmp(name, EL_NUMBER)) { ent.type = ST_NUMBER; SET_STACK_VARNAME; ALLOC_ZVAL(ent.data); INIT_PZVAL(ent.data); Z_TYPE_P(ent.data) = IS_LONG; Z_LVAL_P(ent.data) = 0; wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); } else if (!strcmp(name, EL_BOOLEAN)) { int i; if (atts) for (i = 0; atts[i]; i++) { if (!strcmp(atts[i], EL_VALUE) && atts[i+1] && atts[i+1][0]) { ent.type = ST_BOOLEAN; SET_STACK_VARNAME; ALLOC_ZVAL(ent.data); INIT_PZVAL(ent.data); Z_TYPE_P(ent.data) = IS_BOOL; wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); php_wddx_process_data(user_data, atts[i+1], strlen(atts[i+1])); break; } } } else if (!strcmp(name, EL_NULL)) { ent.type = ST_NULL; SET_STACK_VARNAME; ALLOC_ZVAL(ent.data); INIT_PZVAL(ent.data); ZVAL_NULL(ent.data); wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); } else if (!strcmp(name, EL_ARRAY)) { ent.type = ST_ARRAY; SET_STACK_VARNAME; ALLOC_ZVAL(ent.data); array_init(ent.data); INIT_PZVAL(ent.data); wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); } else if (!strcmp(name, EL_STRUCT)) { ent.type = ST_STRUCT; SET_STACK_VARNAME; ALLOC_ZVAL(ent.data); array_init(ent.data); INIT_PZVAL(ent.data); wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); } else if (!strcmp(name, EL_VAR)) { int i; if (atts) for (i = 0; atts[i]; i++) { if (!strcmp(atts[i], EL_NAME) && atts[i+1] && atts[i+1][0]) { if (stack->varname) efree(stack->varname); stack->varname = estrdup(atts[i+1]); break; } } } else if (!strcmp(name, EL_RECORDSET)) { int i; ent.type = ST_RECORDSET; SET_STACK_VARNAME; MAKE_STD_ZVAL(ent.data); array_init(ent.data); if (atts) for (i = 0; atts[i]; i++) { if (!strcmp(atts[i], "fieldNames") && atts[i+1] && atts[i+1][0]) { zval *tmp; char *key; char *p1, *p2, *endp; i++; endp = (char *)atts[i] + strlen(atts[i]); p1 = (char *)atts[i]; while ((p2 = php_memnstr(p1, ",", sizeof(",")-1, endp)) != NULL) { key = estrndup(p1, p2 - p1); MAKE_STD_ZVAL(tmp); array_init(tmp); add_assoc_zval_ex(ent.data, key, p2 - p1 + 1, tmp); p1 = p2 + sizeof(",")-1; efree(key); } if (p1 <= endp) { MAKE_STD_ZVAL(tmp); array_init(tmp); add_assoc_zval_ex(ent.data, p1, endp - p1 + 1, tmp); } break; } } wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); } else if (!strcmp(name, EL_FIELD)) { int i; st_entry ent; ent.type = ST_FIELD; ent.varname = NULL; ent.data = NULL; if (atts) for (i = 0; atts[i]; i++) { if (!strcmp(atts[i], EL_NAME) && atts[i+1] && atts[i+1][0]) { st_entry *recordset; zval **field; if (wddx_stack_top(stack, (void**)&recordset) == SUCCESS && recordset->type == ST_RECORDSET && zend_hash_find(Z_ARRVAL_P(recordset->data), (char*)atts[i+1], strlen(atts[i+1])+1, (void**)&field) == SUCCESS) { ent.data = *field; } break; } } wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); } else if (!strcmp(name, EL_DATETIME)) { ent.type = ST_DATETIME; SET_STACK_VARNAME; ALLOC_ZVAL(ent.data); INIT_PZVAL(ent.data); Z_TYPE_P(ent.data) = IS_LONG; wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); } } Commit Message: Fix bug #73631 - Invalid read when wddx decodes empty boolean element CWE ID: CWE-125
static void php_wddx_push_element(void *user_data, const XML_Char *name, const XML_Char **atts) { st_entry ent; wddx_stack *stack = (wddx_stack *)user_data; if (!strcmp(name, EL_PACKET)) { int i; if (atts) for (i=0; atts[i]; i++) { if (!strcmp(atts[i], EL_VERSION)) { /* nothing for now */ } } } else if (!strcmp(name, EL_STRING)) { ent.type = ST_STRING; SET_STACK_VARNAME; ALLOC_ZVAL(ent.data); INIT_PZVAL(ent.data); Z_TYPE_P(ent.data) = IS_STRING; Z_STRVAL_P(ent.data) = STR_EMPTY_ALLOC(); Z_STRLEN_P(ent.data) = 0; wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); } else if (!strcmp(name, EL_BINARY)) { ent.type = ST_BINARY; SET_STACK_VARNAME; ALLOC_ZVAL(ent.data); INIT_PZVAL(ent.data); Z_TYPE_P(ent.data) = IS_STRING; Z_STRVAL_P(ent.data) = STR_EMPTY_ALLOC(); Z_STRLEN_P(ent.data) = 0; wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); } else if (!strcmp(name, EL_CHAR)) { int i; if (atts) for (i = 0; atts[i]; i++) { if (!strcmp(atts[i], EL_CHAR_CODE) && atts[i+1] && atts[i+1][0]) { char tmp_buf[2]; snprintf(tmp_buf, sizeof(tmp_buf), "%c", (char)strtol(atts[i+1], NULL, 16)); php_wddx_process_data(user_data, tmp_buf, strlen(tmp_buf)); break; } } } else if (!strcmp(name, EL_NUMBER)) { ent.type = ST_NUMBER; SET_STACK_VARNAME; ALLOC_ZVAL(ent.data); INIT_PZVAL(ent.data); Z_TYPE_P(ent.data) = IS_LONG; Z_LVAL_P(ent.data) = 0; wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); } else if (!strcmp(name, EL_BOOLEAN)) { int i; if (atts) for (i = 0; atts[i]; i++) { if (!strcmp(atts[i], EL_VALUE) && atts[i+1] && atts[i+1][0]) { ent.type = ST_BOOLEAN; SET_STACK_VARNAME; ALLOC_ZVAL(ent.data); INIT_PZVAL(ent.data); Z_TYPE_P(ent.data) = IS_BOOL; wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); php_wddx_process_data(user_data, atts[i+1], strlen(atts[i+1])); break; } } else { ent.type = ST_BOOLEAN; SET_STACK_VARNAME; ZVAL_FALSE(&ent.data); wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); } } else if (!strcmp(name, EL_NULL)) { ent.type = ST_NULL; SET_STACK_VARNAME; ALLOC_ZVAL(ent.data); INIT_PZVAL(ent.data); ZVAL_NULL(ent.data); wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); } else if (!strcmp(name, EL_ARRAY)) { ent.type = ST_ARRAY; SET_STACK_VARNAME; ALLOC_ZVAL(ent.data); array_init(ent.data); INIT_PZVAL(ent.data); wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); } else if (!strcmp(name, EL_STRUCT)) { ent.type = ST_STRUCT; SET_STACK_VARNAME; ALLOC_ZVAL(ent.data); array_init(ent.data); INIT_PZVAL(ent.data); wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); } else if (!strcmp(name, EL_VAR)) { int i; if (atts) for (i = 0; atts[i]; i++) { if (!strcmp(atts[i], EL_NAME) && atts[i+1] && atts[i+1][0]) { if (stack->varname) efree(stack->varname); stack->varname = estrdup(atts[i+1]); break; } } } else if (!strcmp(name, EL_RECORDSET)) { int i; ent.type = ST_RECORDSET; SET_STACK_VARNAME; MAKE_STD_ZVAL(ent.data); array_init(ent.data); if (atts) for (i = 0; atts[i]; i++) { if (!strcmp(atts[i], "fieldNames") && atts[i+1] && atts[i+1][0]) { zval *tmp; char *key; char *p1, *p2, *endp; i++; endp = (char *)atts[i] + strlen(atts[i]); p1 = (char *)atts[i]; while ((p2 = php_memnstr(p1, ",", sizeof(",")-1, endp)) != NULL) { key = estrndup(p1, p2 - p1); MAKE_STD_ZVAL(tmp); array_init(tmp); add_assoc_zval_ex(ent.data, key, p2 - p1 + 1, tmp); p1 = p2 + sizeof(",")-1; efree(key); } if (p1 <= endp) { MAKE_STD_ZVAL(tmp); array_init(tmp); add_assoc_zval_ex(ent.data, p1, endp - p1 + 1, tmp); } break; } } wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); } else if (!strcmp(name, EL_FIELD)) { int i; st_entry ent; ent.type = ST_FIELD; ent.varname = NULL; ent.data = NULL; if (atts) for (i = 0; atts[i]; i++) { if (!strcmp(atts[i], EL_NAME) && atts[i+1] && atts[i+1][0]) { st_entry *recordset; zval **field; if (wddx_stack_top(stack, (void**)&recordset) == SUCCESS && recordset->type == ST_RECORDSET && zend_hash_find(Z_ARRVAL_P(recordset->data), (char*)atts[i+1], strlen(atts[i+1])+1, (void**)&field) == SUCCESS) { ent.data = *field; } break; } } wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); } else if (!strcmp(name, EL_DATETIME)) { ent.type = ST_DATETIME; SET_STACK_VARNAME; ALLOC_ZVAL(ent.data); INIT_PZVAL(ent.data); Z_TYPE_P(ent.data) = IS_LONG; wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry)); } }
168,667
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static Image *ReadPICTImage(const ImageInfo *image_info, ExceptionInfo *exception) { #define ThrowPICTException(exception,message) \ { \ if (tile_image != (Image *) NULL) \ tile_image=DestroyImage(tile_image); \ if (read_info != (ImageInfo *) NULL) \ read_info=DestroyImageInfo(read_info); \ ThrowReaderException((exception),(message)); \ } char geometry[MagickPathExtent], header_ole[4]; Image *image, *tile_image; ImageInfo *read_info; int c, code; MagickBooleanType jpeg, status; PICTRectangle frame; PICTPixmap pixmap; Quantum index; register Quantum *q; register ssize_t i, x; size_t extent, length; ssize_t count, flags, j, version, y; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read PICT header. */ read_info=(ImageInfo *) NULL; tile_image=(Image *) NULL; pixmap.bits_per_pixel=0; pixmap.component_count=0; /* Skip header : 512 for standard PICT and 4, ie "PICT" for OLE2. */ header_ole[0]=ReadBlobByte(image); header_ole[1]=ReadBlobByte(image); header_ole[2]=ReadBlobByte(image); header_ole[3]=ReadBlobByte(image); if (!((header_ole[0] == 0x50) && (header_ole[1] == 0x49) && (header_ole[2] == 0x43) && (header_ole[3] == 0x54 ))) for (i=0; i < 508; i++) if (ReadBlobByte(image) == EOF) break; (void) ReadBlobMSBShort(image); /* skip picture size */ if (ReadRectangle(image,&frame) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); while ((c=ReadBlobByte(image)) == 0) ; if (c != 0x11) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); version=(ssize_t) ReadBlobByte(image); if (version == 2) { c=ReadBlobByte(image); if (c != 0xff) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); } else if (version != 1) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); if ((frame.left < 0) || (frame.right < 0) || (frame.top < 0) || (frame.bottom < 0) || (frame.left >= frame.right) || (frame.top >= frame.bottom)) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); /* Create black canvas. */ flags=0; image->depth=8; image->columns=(size_t) (frame.right-frame.left); image->rows=(size_t) (frame.bottom-frame.top); image->resolution.x=DefaultResolution; image->resolution.y=DefaultResolution; image->units=UndefinedResolution; if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows,exception); if (status != MagickFalse) status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); /* Interpret PICT opcodes. */ jpeg=MagickFalse; for (code=0; EOFBlob(image) == MagickFalse; ) { if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; if ((version == 1) || ((TellBlob(image) % 2) != 0)) code=ReadBlobByte(image); if (version == 2) code=ReadBlobMSBSignedShort(image); if (code < 0) break; if (code == 0) continue; if (code > 0xa1) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"%04X:",code); } else { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " %04X %s: %s",code,codes[code].name,codes[code].description); switch (code) { case 0x01: { /* Clipping rectangle. */ length=ReadBlobMSBShort(image); if (length != 0x000a) { for (i=0; i < (ssize_t) (length-2); i++) if (ReadBlobByte(image) == EOF) break; break; } if (ReadRectangle(image,&frame) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); if (((frame.left & 0x8000) != 0) || ((frame.top & 0x8000) != 0)) break; image->columns=(size_t) (frame.right-frame.left); image->rows=(size_t) (frame.bottom-frame.top); status=SetImageExtent(image,image->columns,image->rows,exception); if (status != MagickFalse) status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); break; } case 0x12: case 0x13: case 0x14: { ssize_t pattern; size_t height, width; /* Skip pattern definition. */ pattern=(ssize_t) ReadBlobMSBShort(image); for (i=0; i < 8; i++) if (ReadBlobByte(image) == EOF) break; if (pattern == 2) { for (i=0; i < 5; i++) if (ReadBlobByte(image) == EOF) break; break; } if (pattern != 1) ThrowPICTException(CorruptImageError,"UnknownPatternType"); length=ReadBlobMSBShort(image); if (ReadRectangle(image,&frame) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); if (ReadPixmap(image,&pixmap) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); image->depth=(size_t) pixmap.component_size; image->resolution.x=1.0*pixmap.horizontal_resolution; image->resolution.y=1.0*pixmap.vertical_resolution; image->units=PixelsPerInchResolution; (void) ReadBlobMSBLong(image); flags=(ssize_t) ReadBlobMSBShort(image); length=ReadBlobMSBShort(image); for (i=0; i <= (ssize_t) length; i++) (void) ReadBlobMSBLong(image); width=(size_t) (frame.bottom-frame.top); height=(size_t) (frame.right-frame.left); if (pixmap.bits_per_pixel <= 8) length&=0x7fff; if (pixmap.bits_per_pixel == 16) width<<=1; if (length == 0) length=width; if (length < 8) { for (i=0; i < (ssize_t) (length*height); i++) if (ReadBlobByte(image) == EOF) break; } else for (i=0; i < (ssize_t) height; i++) { if (EOFBlob(image) != MagickFalse) break; if (length > 200) { for (j=0; j < (ssize_t) ReadBlobMSBShort(image); j++) if (ReadBlobByte(image) == EOF) break; } else for (j=0; j < (ssize_t) ReadBlobByte(image); j++) if (ReadBlobByte(image) == EOF) break; } break; } case 0x1b: { /* Initialize image background color. */ image->background_color.red=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); image->background_color.green=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); image->background_color.blue=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); break; } case 0x70: case 0x71: case 0x72: case 0x73: case 0x74: case 0x75: case 0x76: case 0x77: { /* Skip polygon or region. */ length=ReadBlobMSBShort(image); for (i=0; i < (ssize_t) (length-2); i++) if (ReadBlobByte(image) == EOF) break; break; } case 0x90: case 0x91: case 0x98: case 0x99: case 0x9a: case 0x9b: { PICTRectangle source, destination; register unsigned char *p; size_t j; ssize_t bytes_per_line; unsigned char *pixels; /* Pixmap clipped by a rectangle. */ bytes_per_line=0; if ((code != 0x9a) && (code != 0x9b)) bytes_per_line=(ssize_t) ReadBlobMSBShort(image); else { (void) ReadBlobMSBShort(image); (void) ReadBlobMSBShort(image); (void) ReadBlobMSBShort(image); } if (ReadRectangle(image,&frame) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); /* Initialize tile image. */ tile_image=CloneImage(image,(size_t) (frame.right-frame.left), (size_t) (frame.bottom-frame.top),MagickTrue,exception); if (tile_image == (Image *) NULL) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); if ((code == 0x9a) || (code == 0x9b) || ((bytes_per_line & 0x8000) != 0)) { if (ReadPixmap(image,&pixmap) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); tile_image->depth=(size_t) pixmap.component_size; tile_image->alpha_trait=pixmap.component_count == 4 ? BlendPixelTrait : UndefinedPixelTrait; tile_image->resolution.x=(double) pixmap.horizontal_resolution; tile_image->resolution.y=(double) pixmap.vertical_resolution; tile_image->units=PixelsPerInchResolution; if (tile_image->alpha_trait != UndefinedPixelTrait) (void) SetImageAlpha(tile_image,OpaqueAlpha,exception); } if ((code != 0x9a) && (code != 0x9b)) { /* Initialize colormap. */ tile_image->colors=2; if ((bytes_per_line & 0x8000) != 0) { (void) ReadBlobMSBLong(image); flags=(ssize_t) ReadBlobMSBShort(image); tile_image->colors=1UL*ReadBlobMSBShort(image)+1; } status=AcquireImageColormap(tile_image,tile_image->colors, exception); if (status == MagickFalse) ThrowPICTException(ResourceLimitError, "MemoryAllocationFailed"); if ((bytes_per_line & 0x8000) != 0) { for (i=0; i < (ssize_t) tile_image->colors; i++) { j=ReadBlobMSBShort(image) % tile_image->colors; if ((flags & 0x8000) != 0) j=(size_t) i; tile_image->colormap[j].red=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); tile_image->colormap[j].green=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); tile_image->colormap[j].blue=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); } } else { for (i=0; i < (ssize_t) tile_image->colors; i++) { tile_image->colormap[i].red=(Quantum) (QuantumRange- tile_image->colormap[i].red); tile_image->colormap[i].green=(Quantum) (QuantumRange- tile_image->colormap[i].green); tile_image->colormap[i].blue=(Quantum) (QuantumRange- tile_image->colormap[i].blue); } } } if (EOFBlob(image) != MagickFalse) ThrowPICTException(CorruptImageError, "InsufficientImageDataInFile"); if (ReadRectangle(image,&source) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); if (ReadRectangle(image,&destination) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlobMSBShort(image); if ((code == 0x91) || (code == 0x99) || (code == 0x9b)) { /* Skip region. */ length=ReadBlobMSBShort(image); for (i=0; i < (ssize_t) (length-2); i++) if (ReadBlobByte(image) == EOF) break; } if ((code != 0x9a) && (code != 0x9b) && (bytes_per_line & 0x8000) == 0) pixels=DecodeImage(image,tile_image,(size_t) bytes_per_line,1, &extent); else pixels=DecodeImage(image,tile_image,(size_t) bytes_per_line, (unsigned int) pixmap.bits_per_pixel,&extent); if (pixels == (unsigned char *) NULL) ThrowPICTException(CorruptImageError,"UnableToUncompressImage"); /* Convert PICT tile image to pixel packets. */ p=pixels; for (y=0; y < (ssize_t) tile_image->rows; y++) { if (p > (pixels+extent+image->columns)) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowPICTException(CorruptImageError,"NotEnoughPixelData"); } q=QueueAuthenticPixels(tile_image,0,y,tile_image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) tile_image->columns; x++) { if (tile_image->storage_class == PseudoClass) { index=(Quantum) ConstrainColormapIndex(tile_image,(ssize_t) *p,exception); SetPixelIndex(tile_image,index,q); SetPixelRed(tile_image, tile_image->colormap[(ssize_t) index].red,q); SetPixelGreen(tile_image, tile_image->colormap[(ssize_t) index].green,q); SetPixelBlue(tile_image, tile_image->colormap[(ssize_t) index].blue,q); } else { if (pixmap.bits_per_pixel == 16) { i=(ssize_t) (*p++); j=(size_t) (*p); SetPixelRed(tile_image,ScaleCharToQuantum( (unsigned char) ((i & 0x7c) << 1)),q); SetPixelGreen(tile_image,ScaleCharToQuantum( (unsigned char) (((i & 0x03) << 6) | ((j & 0xe0) >> 2))),q); SetPixelBlue(tile_image,ScaleCharToQuantum( (unsigned char) ((j & 0x1f) << 3)),q); } else if (tile_image->alpha_trait == UndefinedPixelTrait) { if (p > (pixels+extent+2*image->columns)) ThrowPICTException(CorruptImageError, "NotEnoughPixelData"); SetPixelRed(tile_image,ScaleCharToQuantum(*p),q); SetPixelGreen(tile_image,ScaleCharToQuantum( *(p+tile_image->columns)),q); SetPixelBlue(tile_image,ScaleCharToQuantum( *(p+2*tile_image->columns)),q); } else { if (p > (pixels+extent+3*image->columns)) ThrowPICTException(CorruptImageError, "NotEnoughPixelData"); SetPixelAlpha(tile_image,ScaleCharToQuantum(*p),q); SetPixelRed(tile_image,ScaleCharToQuantum( *(p+tile_image->columns)),q); SetPixelGreen(tile_image,ScaleCharToQuantum( *(p+2*tile_image->columns)),q); SetPixelBlue(tile_image,ScaleCharToQuantum( *(p+3*tile_image->columns)),q); } } p++; q+=GetPixelChannels(tile_image); } if (SyncAuthenticPixels(tile_image,exception) == MagickFalse) break; if ((tile_image->storage_class == DirectClass) && (pixmap.bits_per_pixel != 16)) { p+=(pixmap.component_count-1)*tile_image->columns; if (p < pixels) break; } status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, tile_image->rows); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); if ((jpeg == MagickFalse) && (EOFBlob(image) == MagickFalse)) if ((code == 0x9a) || (code == 0x9b) || ((bytes_per_line & 0x8000) != 0)) (void) CompositeImage(image,tile_image,CopyCompositeOp, MagickTrue,(ssize_t) destination.left,(ssize_t) destination.top,exception); tile_image=DestroyImage(tile_image); break; } case 0xa1: { unsigned char *info; size_t type; /* Comment. */ type=ReadBlobMSBShort(image); length=ReadBlobMSBShort(image); if (length == 0) break; (void) ReadBlobMSBLong(image); length-=MagickMin(length,4); if (length == 0) break; info=(unsigned char *) AcquireQuantumMemory(length,sizeof(*info)); if (info == (unsigned char *) NULL) break; count=ReadBlob(image,length,info); if (count != (ssize_t) length) { info=(unsigned char *) RelinquishMagickMemory(info); ThrowPICTException(ResourceLimitError,"UnableToReadImageData"); } switch (type) { case 0xe0: { profile=BlobToStringInfo((const void *) NULL,length); SetStringInfoDatum(profile,info); status=SetImageProfile(image,"icc",profile,exception); profile=DestroyStringInfo(profile); if (status == MagickFalse) { info=(unsigned char *) RelinquishMagickMemory(info); ThrowPICTException(ResourceLimitError, "MemoryAllocationFailed"); } break; } case 0x1f2: { profile=BlobToStringInfo((const void *) NULL,length); SetStringInfoDatum(profile,info); status=SetImageProfile(image,"iptc",profile,exception); if (status == MagickFalse) { info=(unsigned char *) RelinquishMagickMemory(info); ThrowPICTException(ResourceLimitError, "MemoryAllocationFailed"); } profile=DestroyStringInfo(profile); break; } default: break; } info=(unsigned char *) RelinquishMagickMemory(info); break; } default: { /* Skip to next op code. */ if (codes[code].length == -1) (void) ReadBlobMSBShort(image); else for (i=0; i < (ssize_t) codes[code].length; i++) if (ReadBlobByte(image) == EOF) break; } } } if (code == 0xc00) { /* Skip header. */ for (i=0; i < 24; i++) if (ReadBlobByte(image) == EOF) break; continue; } if (((code >= 0xb0) && (code <= 0xcf)) || ((code >= 0x8000) && (code <= 0x80ff))) continue; if (code == 0x8200) { char filename[MaxTextExtent]; FILE *file; int unique_file; /* Embedded JPEG. */ jpeg=MagickTrue; read_info=CloneImageInfo(image_info); SetImageInfoBlob(read_info,(void *) NULL,0); file=(FILE *) NULL; unique_file=AcquireUniqueFileResource(filename); (void) FormatLocaleString(read_info->filename,MaxTextExtent,"jpeg:%s", filename); if (unique_file != -1) file=fdopen(unique_file,"wb"); if ((unique_file == -1) || (file == (FILE *) NULL)) { (void) RelinquishUniqueFileResource(read_info->filename); (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); ThrowPICTException(FileOpenError,"UnableToCreateTemporaryFile"); } length=ReadBlobMSBLong(image); if (length > 154) { for (i=0; i < 6; i++) (void) ReadBlobMSBLong(image); if (ReadRectangle(image,&frame) == MagickFalse) { (void) fclose(file); (void) RelinquishUniqueFileResource(read_info->filename); ThrowPICTException(CorruptImageError,"ImproperImageHeader"); } for (i=0; i < 122; i++) if (ReadBlobByte(image) == EOF) break; for (i=0; i < (ssize_t) (length-154); i++) { c=ReadBlobByte(image); if (c == EOF) break; if (fputc(c,file) != c) break; } } (void) fclose(file); (void) close(unique_file); tile_image=ReadImage(read_info,exception); (void) RelinquishUniqueFileResource(filename); read_info=DestroyImageInfo(read_info); if (tile_image == (Image *) NULL) continue; (void) FormatLocaleString(geometry,MagickPathExtent,"%.20gx%.20g", (double) MagickMax(image->columns,tile_image->columns), (double) MagickMax(image->rows,tile_image->rows)); (void) SetImageExtent(image, MagickMax(image->columns,tile_image->columns), MagickMax(image->rows,tile_image->rows),exception); (void) TransformImageColorspace(image,tile_image->colorspace,exception); (void) CompositeImage(image,tile_image,CopyCompositeOp,MagickTrue, (ssize_t) frame.left,(ssize_t) frame.right,exception); image->compression=tile_image->compression; tile_image=DestroyImage(tile_image); continue; } if ((code == 0xff) || (code == 0xffff)) break; if (((code >= 0xd0) && (code <= 0xfe)) || ((code >= 0x8100) && (code <= 0xffff))) { /* Skip reserved. */ length=ReadBlobMSBShort(image); for (i=0; i < (ssize_t) length; i++) if (ReadBlobByte(image) == EOF) break; continue; } if ((code >= 0x100) && (code <= 0x7fff)) { /* Skip reserved. */ length=(size_t) ((code >> 7) & 0xff); for (i=0; i < (ssize_t) length; i++) if (ReadBlobByte(image) == EOF) break; continue; } } (void) CloseBlob(image); return(GetFirstImageInList(image)); } Commit Message: https://github.com/ImageMagick/ImageMagick/issues/1269 CWE ID: CWE-20
static Image *ReadPICTImage(const ImageInfo *image_info, ExceptionInfo *exception) { #define ThrowPICTException(exception,message) \ { \ if (tile_image != (Image *) NULL) \ tile_image=DestroyImage(tile_image); \ if (read_info != (ImageInfo *) NULL) \ read_info=DestroyImageInfo(read_info); \ ThrowReaderException((exception),(message)); \ } char geometry[MagickPathExtent], header_ole[4]; Image *image, *tile_image; ImageInfo *read_info; int c, code; MagickBooleanType jpeg, status; PICTRectangle frame; PICTPixmap pixmap; Quantum index; register Quantum *q; register ssize_t i, x; size_t extent, length; ssize_t count, flags, j, version, y; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read PICT header. */ read_info=(ImageInfo *) NULL; tile_image=(Image *) NULL; pixmap.bits_per_pixel=0; pixmap.component_count=0; /* Skip header : 512 for standard PICT and 4, ie "PICT" for OLE2. */ header_ole[0]=ReadBlobByte(image); header_ole[1]=ReadBlobByte(image); header_ole[2]=ReadBlobByte(image); header_ole[3]=ReadBlobByte(image); if (!((header_ole[0] == 0x50) && (header_ole[1] == 0x49) && (header_ole[2] == 0x43) && (header_ole[3] == 0x54 ))) for (i=0; i < 508; i++) if (ReadBlobByte(image) == EOF) break; (void) ReadBlobMSBShort(image); /* skip picture size */ if (ReadRectangle(image,&frame) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); while ((c=ReadBlobByte(image)) == 0) ; if (c != 0x11) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); version=(ssize_t) ReadBlobByte(image); if (version == 2) { c=ReadBlobByte(image); if (c != 0xff) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); } else if (version != 1) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); if ((frame.left < 0) || (frame.right < 0) || (frame.top < 0) || (frame.bottom < 0) || (frame.left >= frame.right) || (frame.top >= frame.bottom)) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); /* Create black canvas. */ flags=0; image->depth=8; image->columns=(size_t) (frame.right-frame.left); image->rows=(size_t) (frame.bottom-frame.top); image->resolution.x=DefaultResolution; image->resolution.y=DefaultResolution; image->units=UndefinedResolution; if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows,exception); if (status != MagickFalse) status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); /* Interpret PICT opcodes. */ jpeg=MagickFalse; for (code=0; EOFBlob(image) == MagickFalse; ) { if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; if ((version == 1) || ((TellBlob(image) % 2) != 0)) code=ReadBlobByte(image); if (version == 2) code=ReadBlobMSBSignedShort(image); if (code < 0) break; if (code == 0) continue; if (code > 0xa1) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"%04X:",code); } else { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " %04X %s: %s",code,codes[code].name,codes[code].description); switch (code) { case 0x01: { /* Clipping rectangle. */ length=ReadBlobMSBShort(image); if (length > GetBlobSize(image)) ThrowPICTException(CorruptImageError, "InsufficientImageDataInFile"); if (length != 0x000a) { for (i=0; i < (ssize_t) (length-2); i++) if (ReadBlobByte(image) == EOF) break; break; } if (ReadRectangle(image,&frame) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); if (((frame.left & 0x8000) != 0) || ((frame.top & 0x8000) != 0)) break; image->columns=(size_t) (frame.right-frame.left); image->rows=(size_t) (frame.bottom-frame.top); status=SetImageExtent(image,image->columns,image->rows,exception); if (status != MagickFalse) status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); break; } case 0x12: case 0x13: case 0x14: { ssize_t pattern; size_t height, width; /* Skip pattern definition. */ pattern=(ssize_t) ReadBlobMSBShort(image); for (i=0; i < 8; i++) if (ReadBlobByte(image) == EOF) break; if (pattern == 2) { for (i=0; i < 5; i++) if (ReadBlobByte(image) == EOF) break; break; } if (pattern != 1) ThrowPICTException(CorruptImageError,"UnknownPatternType"); length=ReadBlobMSBShort(image); if (length > GetBlobSize(image)) ThrowPICTException(CorruptImageError, "InsufficientImageDataInFile"); if (ReadRectangle(image,&frame) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); if (ReadPixmap(image,&pixmap) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); image->depth=(size_t) pixmap.component_size; image->resolution.x=1.0*pixmap.horizontal_resolution; image->resolution.y=1.0*pixmap.vertical_resolution; image->units=PixelsPerInchResolution; (void) ReadBlobMSBLong(image); flags=(ssize_t) ReadBlobMSBShort(image); length=ReadBlobMSBShort(image); if (length > GetBlobSize(image)) ThrowPICTException(CorruptImageError, "InsufficientImageDataInFile"); for (i=0; i <= (ssize_t) length; i++) (void) ReadBlobMSBLong(image); width=(size_t) (frame.bottom-frame.top); height=(size_t) (frame.right-frame.left); if (pixmap.bits_per_pixel <= 8) length&=0x7fff; if (pixmap.bits_per_pixel == 16) width<<=1; if (length == 0) length=width; if (length < 8) { for (i=0; i < (ssize_t) (length*height); i++) if (ReadBlobByte(image) == EOF) break; } else for (i=0; i < (ssize_t) height; i++) { if (EOFBlob(image) != MagickFalse) break; if (length > 200) { for (j=0; j < (ssize_t) ReadBlobMSBShort(image); j++) if (ReadBlobByte(image) == EOF) break; } else for (j=0; j < (ssize_t) ReadBlobByte(image); j++) if (ReadBlobByte(image) == EOF) break; } break; } case 0x1b: { /* Initialize image background color. */ image->background_color.red=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); image->background_color.green=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); image->background_color.blue=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); break; } case 0x70: case 0x71: case 0x72: case 0x73: case 0x74: case 0x75: case 0x76: case 0x77: { /* Skip polygon or region. */ length=ReadBlobMSBShort(image); if (length > GetBlobSize(image)) ThrowPICTException(CorruptImageError, "InsufficientImageDataInFile"); for (i=0; i < (ssize_t) (length-2); i++) if (ReadBlobByte(image) == EOF) break; break; } case 0x90: case 0x91: case 0x98: case 0x99: case 0x9a: case 0x9b: { PICTRectangle source, destination; register unsigned char *p; size_t j; ssize_t bytes_per_line; unsigned char *pixels; /* Pixmap clipped by a rectangle. */ bytes_per_line=0; if ((code != 0x9a) && (code != 0x9b)) bytes_per_line=(ssize_t) ReadBlobMSBShort(image); else { (void) ReadBlobMSBShort(image); (void) ReadBlobMSBShort(image); (void) ReadBlobMSBShort(image); } if (ReadRectangle(image,&frame) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); /* Initialize tile image. */ tile_image=CloneImage(image,(size_t) (frame.right-frame.left), (size_t) (frame.bottom-frame.top),MagickTrue,exception); if (tile_image == (Image *) NULL) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); if ((code == 0x9a) || (code == 0x9b) || ((bytes_per_line & 0x8000) != 0)) { if (ReadPixmap(image,&pixmap) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); tile_image->depth=(size_t) pixmap.component_size; tile_image->alpha_trait=pixmap.component_count == 4 ? BlendPixelTrait : UndefinedPixelTrait; tile_image->resolution.x=(double) pixmap.horizontal_resolution; tile_image->resolution.y=(double) pixmap.vertical_resolution; tile_image->units=PixelsPerInchResolution; if (tile_image->alpha_trait != UndefinedPixelTrait) (void) SetImageAlpha(tile_image,OpaqueAlpha,exception); } if ((code != 0x9a) && (code != 0x9b)) { /* Initialize colormap. */ tile_image->colors=2; if ((bytes_per_line & 0x8000) != 0) { (void) ReadBlobMSBLong(image); flags=(ssize_t) ReadBlobMSBShort(image); tile_image->colors=1UL*ReadBlobMSBShort(image)+1; } status=AcquireImageColormap(tile_image,tile_image->colors, exception); if (status == MagickFalse) ThrowPICTException(ResourceLimitError, "MemoryAllocationFailed"); if ((bytes_per_line & 0x8000) != 0) { for (i=0; i < (ssize_t) tile_image->colors; i++) { j=ReadBlobMSBShort(image) % tile_image->colors; if ((flags & 0x8000) != 0) j=(size_t) i; tile_image->colormap[j].red=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); tile_image->colormap[j].green=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); tile_image->colormap[j].blue=(Quantum) ScaleShortToQuantum(ReadBlobMSBShort(image)); } } else { for (i=0; i < (ssize_t) tile_image->colors; i++) { tile_image->colormap[i].red=(Quantum) (QuantumRange- tile_image->colormap[i].red); tile_image->colormap[i].green=(Quantum) (QuantumRange- tile_image->colormap[i].green); tile_image->colormap[i].blue=(Quantum) (QuantumRange- tile_image->colormap[i].blue); } } } if (EOFBlob(image) != MagickFalse) ThrowPICTException(CorruptImageError, "InsufficientImageDataInFile"); if (ReadRectangle(image,&source) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); if (ReadRectangle(image,&destination) == MagickFalse) ThrowPICTException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlobMSBShort(image); if ((code == 0x91) || (code == 0x99) || (code == 0x9b)) { /* Skip region. */ length=ReadBlobMSBShort(image); if (length > GetBlobSize(image)) ThrowPICTException(CorruptImageError, "InsufficientImageDataInFile"); for (i=0; i < (ssize_t) (length-2); i++) if (ReadBlobByte(image) == EOF) break; } if ((code != 0x9a) && (code != 0x9b) && (bytes_per_line & 0x8000) == 0) pixels=DecodeImage(image,tile_image,(size_t) bytes_per_line,1, &extent); else pixels=DecodeImage(image,tile_image,(size_t) bytes_per_line, (unsigned int) pixmap.bits_per_pixel,&extent); if (pixels == (unsigned char *) NULL) ThrowPICTException(CorruptImageError,"UnableToUncompressImage"); /* Convert PICT tile image to pixel packets. */ p=pixels; for (y=0; y < (ssize_t) tile_image->rows; y++) { if (p > (pixels+extent+image->columns)) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowPICTException(CorruptImageError,"NotEnoughPixelData"); } q=QueueAuthenticPixels(tile_image,0,y,tile_image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) tile_image->columns; x++) { if (tile_image->storage_class == PseudoClass) { index=(Quantum) ConstrainColormapIndex(tile_image,(ssize_t) *p,exception); SetPixelIndex(tile_image,index,q); SetPixelRed(tile_image, tile_image->colormap[(ssize_t) index].red,q); SetPixelGreen(tile_image, tile_image->colormap[(ssize_t) index].green,q); SetPixelBlue(tile_image, tile_image->colormap[(ssize_t) index].blue,q); } else { if (pixmap.bits_per_pixel == 16) { i=(ssize_t) (*p++); j=(size_t) (*p); SetPixelRed(tile_image,ScaleCharToQuantum( (unsigned char) ((i & 0x7c) << 1)),q); SetPixelGreen(tile_image,ScaleCharToQuantum( (unsigned char) (((i & 0x03) << 6) | ((j & 0xe0) >> 2))),q); SetPixelBlue(tile_image,ScaleCharToQuantum( (unsigned char) ((j & 0x1f) << 3)),q); } else if (tile_image->alpha_trait == UndefinedPixelTrait) { if (p > (pixels+extent+2*image->columns)) ThrowPICTException(CorruptImageError, "NotEnoughPixelData"); SetPixelRed(tile_image,ScaleCharToQuantum(*p),q); SetPixelGreen(tile_image,ScaleCharToQuantum( *(p+tile_image->columns)),q); SetPixelBlue(tile_image,ScaleCharToQuantum( *(p+2*tile_image->columns)),q); } else { if (p > (pixels+extent+3*image->columns)) ThrowPICTException(CorruptImageError, "NotEnoughPixelData"); SetPixelAlpha(tile_image,ScaleCharToQuantum(*p),q); SetPixelRed(tile_image,ScaleCharToQuantum( *(p+tile_image->columns)),q); SetPixelGreen(tile_image,ScaleCharToQuantum( *(p+2*tile_image->columns)),q); SetPixelBlue(tile_image,ScaleCharToQuantum( *(p+3*tile_image->columns)),q); } } p++; q+=GetPixelChannels(tile_image); } if (SyncAuthenticPixels(tile_image,exception) == MagickFalse) break; if ((tile_image->storage_class == DirectClass) && (pixmap.bits_per_pixel != 16)) { p+=(pixmap.component_count-1)*tile_image->columns; if (p < pixels) break; } status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, tile_image->rows); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); if ((jpeg == MagickFalse) && (EOFBlob(image) == MagickFalse)) if ((code == 0x9a) || (code == 0x9b) || ((bytes_per_line & 0x8000) != 0)) (void) CompositeImage(image,tile_image,CopyCompositeOp, MagickTrue,(ssize_t) destination.left,(ssize_t) destination.top,exception); tile_image=DestroyImage(tile_image); break; } case 0xa1: { unsigned char *info; size_t type; /* Comment. */ type=ReadBlobMSBShort(image); length=ReadBlobMSBShort(image); if (length > GetBlobSize(image)) ThrowPICTException(CorruptImageError, "InsufficientImageDataInFile"); if (length == 0) break; (void) ReadBlobMSBLong(image); length-=MagickMin(length,4); if (length == 0) break; info=(unsigned char *) AcquireQuantumMemory(length,sizeof(*info)); if (info == (unsigned char *) NULL) break; count=ReadBlob(image,length,info); if (count != (ssize_t) length) { info=(unsigned char *) RelinquishMagickMemory(info); ThrowPICTException(ResourceLimitError,"UnableToReadImageData"); } switch (type) { case 0xe0: { profile=BlobToStringInfo((const void *) NULL,length); SetStringInfoDatum(profile,info); status=SetImageProfile(image,"icc",profile,exception); profile=DestroyStringInfo(profile); if (status == MagickFalse) { info=(unsigned char *) RelinquishMagickMemory(info); ThrowPICTException(ResourceLimitError, "MemoryAllocationFailed"); } break; } case 0x1f2: { profile=BlobToStringInfo((const void *) NULL,length); SetStringInfoDatum(profile,info); status=SetImageProfile(image,"iptc",profile,exception); if (status == MagickFalse) { info=(unsigned char *) RelinquishMagickMemory(info); ThrowPICTException(ResourceLimitError, "MemoryAllocationFailed"); } profile=DestroyStringInfo(profile); break; } default: break; } info=(unsigned char *) RelinquishMagickMemory(info); break; } default: { /* Skip to next op code. */ if (codes[code].length == -1) (void) ReadBlobMSBShort(image); else for (i=0; i < (ssize_t) codes[code].length; i++) if (ReadBlobByte(image) == EOF) break; } } } if (code == 0xc00) { /* Skip header. */ for (i=0; i < 24; i++) if (ReadBlobByte(image) == EOF) break; continue; } if (((code >= 0xb0) && (code <= 0xcf)) || ((code >= 0x8000) && (code <= 0x80ff))) continue; if (code == 0x8200) { char filename[MaxTextExtent]; FILE *file; int unique_file; /* Embedded JPEG. */ jpeg=MagickTrue; read_info=CloneImageInfo(image_info); SetImageInfoBlob(read_info,(void *) NULL,0); file=(FILE *) NULL; unique_file=AcquireUniqueFileResource(filename); (void) FormatLocaleString(read_info->filename,MaxTextExtent,"jpeg:%s", filename); if (unique_file != -1) file=fdopen(unique_file,"wb"); if ((unique_file == -1) || (file == (FILE *) NULL)) { (void) RelinquishUniqueFileResource(read_info->filename); (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); ThrowPICTException(FileOpenError,"UnableToCreateTemporaryFile"); } length=ReadBlobMSBLong(image); if (length > GetBlobSize(image)) ThrowPICTException(CorruptImageError, "InsufficientImageDataInFile"); if (length > 154) { for (i=0; i < 6; i++) (void) ReadBlobMSBLong(image); if (ReadRectangle(image,&frame) == MagickFalse) { (void) fclose(file); (void) RelinquishUniqueFileResource(read_info->filename); ThrowPICTException(CorruptImageError,"ImproperImageHeader"); } for (i=0; i < 122; i++) if (ReadBlobByte(image) == EOF) break; for (i=0; i < (ssize_t) (length-154); i++) { c=ReadBlobByte(image); if (c == EOF) break; if (fputc(c,file) != c) break; } } (void) fclose(file); (void) close(unique_file); tile_image=ReadImage(read_info,exception); (void) RelinquishUniqueFileResource(filename); read_info=DestroyImageInfo(read_info); if (tile_image == (Image *) NULL) continue; (void) FormatLocaleString(geometry,MagickPathExtent,"%.20gx%.20g", (double) MagickMax(image->columns,tile_image->columns), (double) MagickMax(image->rows,tile_image->rows)); (void) SetImageExtent(image, MagickMax(image->columns,tile_image->columns), MagickMax(image->rows,tile_image->rows),exception); (void) TransformImageColorspace(image,tile_image->colorspace,exception); (void) CompositeImage(image,tile_image,CopyCompositeOp,MagickTrue, (ssize_t) frame.left,(ssize_t) frame.right,exception); image->compression=tile_image->compression; tile_image=DestroyImage(tile_image); continue; } if ((code == 0xff) || (code == 0xffff)) break; if (((code >= 0xd0) && (code <= 0xfe)) || ((code >= 0x8100) && (code <= 0xffff))) { /* Skip reserved. */ length=ReadBlobMSBShort(image); if (length > GetBlobSize(image)) ThrowPICTException(CorruptImageError, "InsufficientImageDataInFile"); for (i=0; i < (ssize_t) length; i++) if (ReadBlobByte(image) == EOF) break; continue; } if ((code >= 0x100) && (code <= 0x7fff)) { /* Skip reserved. */ length=(size_t) ((code >> 7) & 0xff); if (length > GetBlobSize(image)) ThrowPICTException(CorruptImageError, "InsufficientImageDataInFile"); for (i=0; i < (ssize_t) length; i++) if (ReadBlobByte(image) == EOF) break; continue; } } (void) CloseBlob(image); return(GetFirstImageInList(image)); }
169,037
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void LocalFileSystem::requestFileSystem(ExecutionContext* context, FileSystemType type, long long size, PassOwnPtr<AsyncFileSystemCallbacks> callbacks) { RefPtrWillBeRawPtr<ExecutionContext> contextPtr(context); RefPtr<CallbackWrapper> wrapper = adoptRef(new CallbackWrapper(callbacks)); requestFileSystemAccessInternal(context, bind(&LocalFileSystem::fileSystemAllowedInternal, this, contextPtr, type, wrapper), bind(&LocalFileSystem::fileSystemNotAllowedInternal, this, contextPtr, wrapper)); } Commit Message: Oilpan: Ship Oilpan for SyncCallbackHelper, CreateFileResult and CallbackWrapper in filesystem/ These are leftovers when we shipped Oilpan for filesystem/ once. BUG=340522 Review URL: https://codereview.chromium.org/501263003 git-svn-id: svn://svn.chromium.org/blink/trunk@180909 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-119
void LocalFileSystem::requestFileSystem(ExecutionContext* context, FileSystemType type, long long size, PassOwnPtr<AsyncFileSystemCallbacks> callbacks) { RefPtrWillBeRawPtr<ExecutionContext> contextPtr(context); CallbackWrapper* wrapper = new CallbackWrapper(callbacks); requestFileSystemAccessInternal(context, bind(&LocalFileSystem::fileSystemAllowedInternal, this, contextPtr, type, wrapper), bind(&LocalFileSystem::fileSystemNotAllowedInternal, this, contextPtr, wrapper)); }
171,429
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void AddInputMethodNames(const GList* engines, InputMethodDescriptors* out) { DCHECK(out); for (; engines; engines = g_list_next(engines)) { IBusEngineDesc* engine_desc = IBUS_ENGINE_DESC(engines->data); const gchar* name = ibus_engine_desc_get_name(engine_desc); const gchar* longname = ibus_engine_desc_get_longname(engine_desc); const gchar* layout = ibus_engine_desc_get_layout(engine_desc); const gchar* language = ibus_engine_desc_get_language(engine_desc); if (InputMethodIdIsWhitelisted(name)) { out->push_back(CreateInputMethodDescriptor(name, longname, layout, language)); DLOG(INFO) << name << " (preloaded)"; } } } Commit Message: Remove use of libcros from InputMethodLibrary. BUG=chromium-os:16238 TEST==confirm that input methods work as before on the netbook. Also confirm that the chrome builds and works on the desktop as before. Review URL: http://codereview.chromium.org/7003086 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@89142 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
void AddInputMethodNames(const GList* engines, InputMethodDescriptors* out) { DCHECK(out); for (; engines; engines = g_list_next(engines)) { IBusEngineDesc* engine_desc = IBUS_ENGINE_DESC(engines->data); const gchar* name = ibus_engine_desc_get_name(engine_desc); const gchar* longname = ibus_engine_desc_get_longname(engine_desc); const gchar* layout = ibus_engine_desc_get_layout(engine_desc); const gchar* language = ibus_engine_desc_get_language(engine_desc); if (InputMethodIdIsWhitelisted(name)) { out->push_back(CreateInputMethodDescriptor(name, longname, layout, language)); VLOG(1) << name << " (preloaded)"; } } }
170,517
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int ext4_split_unwritten_extents(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, struct ext4_ext_path *path, int flags) { struct ext4_extent *ex, newex, orig_ex; struct ext4_extent *ex1 = NULL; struct ext4_extent *ex2 = NULL; struct ext4_extent *ex3 = NULL; ext4_lblk_t ee_block, eof_block; unsigned int allocated, ee_len, depth; ext4_fsblk_t newblock; int err = 0; int may_zeroout; ext_debug("ext4_split_unwritten_extents: inode %lu, logical" "block %llu, max_blocks %u\n", inode->i_ino, (unsigned long long)map->m_lblk, map->m_len); eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; if (eof_block < map->m_lblk + map->m_len) eof_block = map->m_lblk + map->m_len; depth = ext_depth(inode); ex = path[depth].p_ext; ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); allocated = ee_len - (map->m_lblk - ee_block); newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex); ex2 = ex; orig_ex.ee_block = ex->ee_block; orig_ex.ee_len = cpu_to_le16(ee_len); ext4_ext_store_pblock(&orig_ex, ext4_ext_pblock(ex)); /* * It is safe to convert extent to initialized via explicit * zeroout only if extent is fully insde i_size or new_size. */ may_zeroout = ee_block + ee_len <= eof_block; /* * If the uninitialized extent begins at the same logical * block where the write begins, and the write completely * covers the extent, then we don't need to split it. */ if ((map->m_lblk == ee_block) && (allocated <= map->m_len)) return allocated; err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto out; /* ex1: ee_block to map->m_lblk - 1 : uninitialized */ if (map->m_lblk > ee_block) { ex1 = ex; ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block); ext4_ext_mark_uninitialized(ex1); ex2 = &newex; } /* * for sanity, update the length of the ex2 extent before * we insert ex3, if ex1 is NULL. This is to avoid temporary * overlap of blocks. */ if (!ex1 && allocated > map->m_len) ex2->ee_len = cpu_to_le16(map->m_len); /* ex3: to ee_block + ee_len : uninitialised */ if (allocated > map->m_len) { unsigned int newdepth; ex3 = &newex; ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len); ext4_ext_store_pblock(ex3, newblock + map->m_len); ex3->ee_len = cpu_to_le16(allocated - map->m_len); ext4_ext_mark_uninitialized(ex3); err = ext4_ext_insert_extent(handle, inode, path, ex3, flags); if (err == -ENOSPC && may_zeroout) { err = ext4_ext_zeroout(inode, &orig_ex); if (err) goto fix_extent_len; /* update the extent length and mark as initialized */ ex->ee_block = orig_ex.ee_block; ex->ee_len = orig_ex.ee_len; ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex)); ext4_ext_dirty(handle, inode, path + depth); /* zeroed the full extent */ /* blocks available from map->m_lblk */ return allocated; } else if (err) goto fix_extent_len; /* * The depth, and hence eh & ex might change * as part of the insert above. */ newdepth = ext_depth(inode); /* * update the extent length after successful insert of the * split extent */ ee_len -= ext4_ext_get_actual_len(ex3); orig_ex.ee_len = cpu_to_le16(ee_len); may_zeroout = ee_block + ee_len <= eof_block; depth = newdepth; ext4_ext_drop_refs(path); path = ext4_ext_find_extent(inode, map->m_lblk, path); if (IS_ERR(path)) { err = PTR_ERR(path); goto out; } ex = path[depth].p_ext; if (ex2 != &newex) ex2 = ex; err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto out; allocated = map->m_len; } /* * If there was a change of depth as part of the * insertion of ex3 above, we need to update the length * of the ex1 extent again here */ if (ex1 && ex1 != ex) { ex1 = ex; ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block); ext4_ext_mark_uninitialized(ex1); ex2 = &newex; } /* * ex2: map->m_lblk to map->m_lblk + map->m_len-1 : to be written * using direct I/O, uninitialised still. */ ex2->ee_block = cpu_to_le32(map->m_lblk); ext4_ext_store_pblock(ex2, newblock); ex2->ee_len = cpu_to_le16(allocated); ext4_ext_mark_uninitialized(ex2); if (ex2 != ex) goto insert; /* Mark modified extent as dirty */ err = ext4_ext_dirty(handle, inode, path + depth); ext_debug("out here\n"); goto out; insert: err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); if (err == -ENOSPC && may_zeroout) { err = ext4_ext_zeroout(inode, &orig_ex); if (err) goto fix_extent_len; /* update the extent length and mark as initialized */ ex->ee_block = orig_ex.ee_block; ex->ee_len = orig_ex.ee_len; ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex)); ext4_ext_dirty(handle, inode, path + depth); /* zero out the first half */ return allocated; } else if (err) goto fix_extent_len; out: ext4_ext_show_leaf(inode, path); return err ? err : allocated; fix_extent_len: ex->ee_block = orig_ex.ee_block; ex->ee_len = orig_ex.ee_len; ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex)); ext4_ext_mark_uninitialized(ex); ext4_ext_dirty(handle, inode, path + depth); return err; } Commit Message: ext4: reimplement convert and split_unwritten Reimplement ext4_ext_convert_to_initialized() and ext4_split_unwritten_extents() using ext4_split_extent() Signed-off-by: Yongqiang Yang <[email protected]> Signed-off-by: "Theodore Ts'o" <[email protected]> Tested-by: Allison Henderson <[email protected]> CWE ID:
static int ext4_split_unwritten_extents(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, struct ext4_ext_path *path, int flags) { ext4_lblk_t eof_block; ext4_lblk_t ee_block; struct ext4_extent *ex; unsigned int ee_len; int split_flag = 0, depth; ext_debug("ext4_split_unwritten_extents: inode %lu, logical" "block %llu, max_blocks %u\n", inode->i_ino, (unsigned long long)map->m_lblk, map->m_len); eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; if (eof_block < map->m_lblk + map->m_len) eof_block = map->m_lblk + map->m_len; /* * It is safe to convert extent to initialized via explicit * zeroout only if extent is fully insde i_size or new_size. */ depth = ext_depth(inode); ex = path[depth].p_ext; ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; split_flag |= EXT4_EXT_MARK_UNINIT2; flags |= EXT4_GET_BLOCKS_PRE_IO; return ext4_split_extent(handle, inode, path, map, split_flag, flags); }
166,218
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int g2m_init_buffers(G2MContext *c) { int aligned_height; if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) { c->framebuf_stride = FFALIGN(c->width * 3, 16); aligned_height = FFALIGN(c->height, 16); av_free(c->framebuf); c->framebuf = av_mallocz(c->framebuf_stride * aligned_height); if (!c->framebuf) return AVERROR(ENOMEM); } if (!c->synth_tile || !c->jpeg_tile || c->old_tile_w < c->tile_width || c->old_tile_h < c->tile_height) { c->tile_stride = FFALIGN(c->tile_width, 16) * 3; aligned_height = FFALIGN(c->tile_height, 16); av_free(c->synth_tile); av_free(c->jpeg_tile); av_free(c->kempf_buf); av_free(c->kempf_flags); c->synth_tile = av_mallocz(c->tile_stride * aligned_height); c->jpeg_tile = av_mallocz(c->tile_stride * aligned_height); c->kempf_buf = av_mallocz((c->tile_width + 1) * aligned_height + FF_INPUT_BUFFER_PADDING_SIZE); c->kempf_flags = av_mallocz( c->tile_width * aligned_height); if (!c->synth_tile || !c->jpeg_tile || !c->kempf_buf || !c->kempf_flags) return AVERROR(ENOMEM); } return 0; } Commit Message: avcodec/g2meet: Fix framebuf size Currently the code can in some cases draw tiles that hang outside the allocated buffer. This patch increases the buffer size to avoid out of array accesses. An alternative would be to fail if such tiles are encountered. I do not know if any valid files use such hanging tiles. Fixes Ticket2971 Found-by: ami_stuff Signed-off-by: Michael Niedermayer <[email protected]> CWE ID: CWE-119
static int g2m_init_buffers(G2MContext *c) { int aligned_height; if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) { c->framebuf_stride = FFALIGN(c->width + 15, 16) * 3; aligned_height = c->height + 15; av_free(c->framebuf); c->framebuf = av_mallocz(c->framebuf_stride * aligned_height); if (!c->framebuf) return AVERROR(ENOMEM); } if (!c->synth_tile || !c->jpeg_tile || c->old_tile_w < c->tile_width || c->old_tile_h < c->tile_height) { c->tile_stride = FFALIGN(c->tile_width, 16) * 3; aligned_height = FFALIGN(c->tile_height, 16); av_free(c->synth_tile); av_free(c->jpeg_tile); av_free(c->kempf_buf); av_free(c->kempf_flags); c->synth_tile = av_mallocz(c->tile_stride * aligned_height); c->jpeg_tile = av_mallocz(c->tile_stride * aligned_height); c->kempf_buf = av_mallocz((c->tile_width + 1) * aligned_height + FF_INPUT_BUFFER_PADDING_SIZE); c->kempf_flags = av_mallocz( c->tile_width * aligned_height); if (!c->synth_tile || !c->jpeg_tile || !c->kempf_buf || !c->kempf_flags) return AVERROR(ENOMEM); } return 0; }
165,915
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: cdf_file_summary_info(struct magic_set *ms, const cdf_header_t *h, const cdf_stream_t *sst, const uint64_t clsid[2]) { cdf_summary_info_header_t si; cdf_property_info_t *info; size_t count; int m; if (cdf_unpack_summary_info(sst, h, &si, &info, &count) == -1) return -1; if (NOTMIME(ms)) { const char *str; if (file_printf(ms, "Composite Document File V2 Document") == -1) return -1; if (file_printf(ms, ", %s Endian", si.si_byte_order == 0xfffe ? "Little" : "Big") == -1) return -2; switch (si.si_os) { case 2: if (file_printf(ms, ", Os: Windows, Version %d.%d", si.si_os_version & 0xff, (uint32_t)si.si_os_version >> 8) == -1) return -2; break; case 1: if (file_printf(ms, ", Os: MacOS, Version %d.%d", (uint32_t)si.si_os_version >> 8, si.si_os_version & 0xff) == -1) return -2; break; default: if (file_printf(ms, ", Os %d, Version: %d.%d", si.si_os, si.si_os_version & 0xff, (uint32_t)si.si_os_version >> 8) == -1) return -2; break; } str = cdf_clsid_to_mime(clsid, clsid2desc); if (str) if (file_printf(ms, ", %s", str) == -1) return -2; } m = cdf_file_property_info(ms, info, count, clsid); free(info); return m == -1 ? -2 : m; } Commit Message: Apply patches from file-CVE-2012-1571.patch From Francisco Alonso Espejo: file < 5.18/git version can be made to crash when checking some corrupt CDF files (Using an invalid cdf_read_short_sector size) The problem I found here, is that in most situations (if h_short_sec_size_p2 > 8) because the blocksize is 512 and normal values are 06 which means reading 64 bytes.As long as the check for the block size copy is not checked properly (there's an assert that makes wrong/invalid assumptions) CWE ID: CWE-119
cdf_file_summary_info(struct magic_set *ms, const cdf_header_t *h, const cdf_stream_t *sst, const cdf_directory_t *root_storage) { cdf_summary_info_header_t si; cdf_property_info_t *info; size_t count; int m; if (cdf_unpack_summary_info(sst, h, &si, &info, &count) == -1) return -1; if (NOTMIME(ms)) { const char *str; if (file_printf(ms, "Composite Document File V2 Document") == -1) return -1; if (file_printf(ms, ", %s Endian", si.si_byte_order == 0xfffe ? "Little" : "Big") == -1) return -2; switch (si.si_os) { case 2: if (file_printf(ms, ", Os: Windows, Version %d.%d", si.si_os_version & 0xff, (uint32_t)si.si_os_version >> 8) == -1) return -2; break; case 1: if (file_printf(ms, ", Os: MacOS, Version %d.%d", (uint32_t)si.si_os_version >> 8, si.si_os_version & 0xff) == -1) return -2; break; default: if (file_printf(ms, ", Os %d, Version: %d.%d", si.si_os, si.si_os_version & 0xff, (uint32_t)si.si_os_version >> 8) == -1) return -2; break; } if (root_storage) { str = cdf_clsid_to_mime(root_storage->d_storage_uuid, clsid2desc); if (str) if (file_printf(ms, ", %s", str) == -1) return -2; } } m = cdf_file_property_info(ms, info, count, root_storage); free(info); return m == -1 ? -2 : m; }
166,446
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int ras_validate(jas_stream_t *in) { uchar buf[RAS_MAGICLEN]; int i; int n; uint_fast32_t magic; assert(JAS_STREAM_MAXPUTBACK >= RAS_MAGICLEN); /* Read the validation data (i.e., the data used for detecting the format). */ if ((n = jas_stream_read(in, buf, RAS_MAGICLEN)) < 0) { return -1; } /* Put the validation data back onto the stream, so that the stream position will not be changed. */ for (i = n - 1; i >= 0; --i) { if (jas_stream_ungetc(in, buf[i]) == EOF) { return -1; } } /* Did we read enough data? */ if (n < RAS_MAGICLEN) { return -1; } magic = (JAS_CAST(uint_fast32_t, buf[0]) << 24) | (JAS_CAST(uint_fast32_t, buf[1]) << 16) | (JAS_CAST(uint_fast32_t, buf[2]) << 8) | buf[3]; /* Is the signature correct for the Sun Rasterfile format? */ if (magic != RAS_MAGIC) { return -1; } return 0; } Commit Message: The generation of the configuration file jas_config.h has been completely reworked in order to avoid pollution of the global namespace. Some problematic types like uchar, ulong, and friends have been replaced with names with a jas_ prefix. An option max_samples has been added to the BMP and JPEG decoders to restrict the maximum size of image that they can decode. This change was made as a (possibly temporary) fix to address security concerns. A max_samples command-line option has also been added to imginfo. Whether an image component (for jas_image_t) is stored in memory or on disk is now based on the component size (rather than the image size). Some debug log message were added. Some new integer overflow checks were added. Some new safe integer add/multiply functions were added. More pre-C99 cruft was removed. JasPer has numerous "hacks" to handle pre-C99 compilers. JasPer now assumes C99 support. So, this pre-C99 cruft is unnecessary and can be removed. The regression jasper-doublefree-mem_close.jpg has been re-enabled. Theoretically, it should work more predictably now. CWE ID: CWE-190
int ras_validate(jas_stream_t *in) { jas_uchar buf[RAS_MAGICLEN]; int i; int n; uint_fast32_t magic; assert(JAS_STREAM_MAXPUTBACK >= RAS_MAGICLEN); /* Read the validation data (i.e., the data used for detecting the format). */ if ((n = jas_stream_read(in, buf, RAS_MAGICLEN)) < 0) { return -1; } /* Put the validation data back onto the stream, so that the stream position will not be changed. */ for (i = n - 1; i >= 0; --i) { if (jas_stream_ungetc(in, buf[i]) == EOF) { return -1; } } /* Did we read enough data? */ if (n < RAS_MAGICLEN) { return -1; } magic = (JAS_CAST(uint_fast32_t, buf[0]) << 24) | (JAS_CAST(uint_fast32_t, buf[1]) << 16) | (JAS_CAST(uint_fast32_t, buf[2]) << 8) | buf[3]; /* Is the signature correct for the Sun Rasterfile format? */ if (magic != RAS_MAGIC) { return -1; } return 0; }
168,729
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void StorageHandler::SetRenderer(RenderProcessHost* process_host, RenderFrameHostImpl* frame_host) { process_ = process_host; } Commit Message: DevTools: speculative fix for crash in NetworkHandler::Disable This keeps BrowserContext* and StoragePartition* instead of RenderProcessHost* in an attemp to resolve UAF of RenderProcessHost upon closure of DevTools front-end. Bug: 801117, 783067, 780694 Change-Id: I6c2cca60cc0c29f0949d189cf918769059f80c1b Reviewed-on: https://chromium-review.googlesource.com/876657 Commit-Queue: Andrey Kosyakov <[email protected]> Reviewed-by: Dmitry Gozman <[email protected]> Cr-Commit-Position: refs/heads/master@{#531157} CWE ID: CWE-20
void StorageHandler::SetRenderer(RenderProcessHost* process_host, void StorageHandler::SetRenderer(int process_host_id, RenderFrameHostImpl* frame_host) { RenderProcessHost* process = RenderProcessHost::FromID(process_host_id); storage_partition_ = process ? process->GetStoragePartition() : nullptr; }
172,774
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: xmlParseNCNameComplex(xmlParserCtxtPtr ctxt) { int len = 0, l; int c; int count = 0; #ifdef DEBUG nbParseNCNameComplex++; #endif /* * Handler for more complex cases */ GROW; c = CUR_CHAR(l); if ((c == ' ') || (c == '>') || (c == '/') || /* accelerators */ (!xmlIsNameStartChar(ctxt, c) || (c == ':'))) { return(NULL); } while ((c != ' ') && (c != '>') && (c != '/') && /* test bigname.xml */ (xmlIsNameChar(ctxt, c) && (c != ':'))) { if (count++ > 100) { count = 0; GROW; } len += l; NEXTL(l); c = CUR_CHAR(l); } return(xmlDictLookup(ctxt->dict, ctxt->input->cur - len, len)); } Commit Message: libxml: XML_PARSER_EOF checks from upstream BUG=229019 TBR=cpu Review URL: https://chromiumcodereview.appspot.com/14053009 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@196804 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-119
xmlParseNCNameComplex(xmlParserCtxtPtr ctxt) { int len = 0, l; int c; int count = 0; #ifdef DEBUG nbParseNCNameComplex++; #endif /* * Handler for more complex cases */ GROW; c = CUR_CHAR(l); if ((c == ' ') || (c == '>') || (c == '/') || /* accelerators */ (!xmlIsNameStartChar(ctxt, c) || (c == ':'))) { return(NULL); } while ((c != ' ') && (c != '>') && (c != '/') && /* test bigname.xml */ (xmlIsNameChar(ctxt, c) && (c != ':'))) { if (count++ > 100) { count = 0; GROW; if (ctxt->instate == XML_PARSER_EOF) return(NULL); } len += l; NEXTL(l); c = CUR_CHAR(l); } return(xmlDictLookup(ctxt->dict, ctxt->input->cur - len, len)); }
171,295
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void WebPageSerializerImpl::openTagToString(Element* element, SerializeDomParam* param) { bool needSkip; StringBuilder result; result.append(preActionBeforeSerializeOpenTag(element, param, &needSkip)); if (needSkip) return; result.append('<'); result.append(element->nodeName().lower()); AttributeCollection attributes = element->attributes(); AttributeCollection::iterator end = attributes.end(); for (AttributeCollection::iterator it = attributes.begin(); it != end; ++it) { result.append(' '); result.append(it->name().toString()); result.appendLiteral("=\""); if (!it->value().isEmpty()) { const String& attrValue = it->value(); const QualifiedName& attrName = it->name(); if (element->hasLegalLinkAttribute(attrName)) { if (attrValue.startsWith("javascript:", TextCaseInsensitive)) { result.append(attrValue); } else { WebLocalFrameImpl* subFrame = WebLocalFrameImpl::fromFrameOwnerElement(element); String completeURL = subFrame ? subFrame->frame()->document()->url() : param->document->completeURL(attrValue); if (m_localLinks.contains(completeURL)) { if (!param->directoryName.isEmpty()) { result.appendLiteral("./"); result.append(param->directoryName); result.append('/'); } result.append(m_localLinks.get(completeURL)); } else { result.append(completeURL); } } } else { if (param->isHTMLDocument) result.append(m_htmlEntities.convertEntitiesInString(attrValue)); else result.append(m_xmlEntities.convertEntitiesInString(attrValue)); } } result.append('\"'); } String addedContents = postActionAfterSerializeOpenTag(element, param); if (element->hasChildren() || param->haveAddedContentsBeforeEnd) result.append('>'); result.append(addedContents); saveHTMLContentToBuffer(result.toString(), param); } Commit Message: Make WebPageSerializerImpl to escape URL attribute values in result. This patch makes |WebPageSerializerImpl| to escape URL attribute values rather than directly output URL attribute values into result. BUG=542054 TEST=webkit_unit_tests --gtest_filter=WebPageSerializerTest.URLAttributeValues Review URL: https://codereview.chromium.org/1398453005 Cr-Commit-Position: refs/heads/master@{#353712} CWE ID: CWE-20
void WebPageSerializerImpl::openTagToString(Element* element, SerializeDomParam* param) { bool needSkip; StringBuilder result; result.append(preActionBeforeSerializeOpenTag(element, param, &needSkip)); if (needSkip) return; result.append('<'); result.append(element->nodeName().lower()); AttributeCollection attributes = element->attributes(); AttributeCollection::iterator end = attributes.end(); for (AttributeCollection::iterator it = attributes.begin(); it != end; ++it) { result.append(' '); result.append(it->name().toString()); result.appendLiteral("=\""); if (!it->value().isEmpty()) { const String& attrValue = it->value(); const QualifiedName& attrName = it->name(); if (element->hasLegalLinkAttribute(attrName)) { if (attrValue.startsWith("javascript:", TextCaseInsensitive)) { result.append(m_htmlEntities.convertEntitiesInString(attrValue)); } else { WebLocalFrameImpl* subFrame = WebLocalFrameImpl::fromFrameOwnerElement(element); String completeURL = subFrame ? subFrame->frame()->document()->url() : param->document->completeURL(attrValue); if (m_localLinks.contains(completeURL)) { if (!param->directoryName.isEmpty()) { result.appendLiteral("./"); result.append(param->directoryName); result.append('/'); } result.append(m_htmlEntities.convertEntitiesInString(m_localLinks.get(completeURL))); } else { result.append(m_htmlEntities.convertEntitiesInString(completeURL)); } } } else { if (param->isHTMLDocument) result.append(m_htmlEntities.convertEntitiesInString(attrValue)); else result.append(m_xmlEntities.convertEntitiesInString(attrValue)); } } result.append('\"'); } String addedContents = postActionAfterSerializeOpenTag(element, param); if (element->hasChildren() || param->haveAddedContentsBeforeEnd) result.append('>'); result.append(addedContents); saveHTMLContentToBuffer(result.toString(), param); }
171,727
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static jboolean enableNative(JNIEnv* env, jobject obj) { ALOGV("%s:",__FUNCTION__); jboolean result = JNI_FALSE; if (!sBluetoothInterface) return result; int ret = sBluetoothInterface->enable(); result = (ret == BT_STATUS_SUCCESS || ret == BT_STATUS_DONE) ? JNI_TRUE : JNI_FALSE; return result; } Commit Message: Add guest mode functionality (3/3) Add a flag to enable() to start Bluetooth in restricted mode. In restricted mode, all devices that are paired during restricted mode are deleted upon leaving restricted mode. Right now restricted mode is only entered while a guest user is active. Bug: 27410683 Change-Id: If4a8855faf362d7f6de509d7ddc7197d1ac75cee CWE ID: CWE-20
static jboolean enableNative(JNIEnv* env, jobject obj) { static jboolean enableNative(JNIEnv* env, jobject obj, jboolean isGuest) { ALOGV("%s:",__FUNCTION__); jboolean result = JNI_FALSE; if (!sBluetoothInterface) return result; int ret = sBluetoothInterface->enable(isGuest == JNI_TRUE ? 1 : 0); result = (ret == BT_STATUS_SUCCESS || ret == BT_STATUS_DONE) ? JNI_TRUE : JNI_FALSE; return result; }
174,161
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) { int r; struct kvm_vcpu *vcpu, *v; vcpu = kvm_arch_vcpu_create(kvm, id); if (IS_ERR(vcpu)) return PTR_ERR(vcpu); preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); r = kvm_arch_vcpu_setup(vcpu); if (r) goto vcpu_destroy; mutex_lock(&kvm->lock); if (!kvm_vcpu_compatible(vcpu)) { r = -EINVAL; goto unlock_vcpu_destroy; } if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { r = -EINVAL; goto unlock_vcpu_destroy; } kvm_for_each_vcpu(r, v, kvm) if (v->vcpu_id == id) { r = -EEXIST; goto unlock_vcpu_destroy; } BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); /* Now it's all set up, let userspace reach it */ kvm_get_kvm(kvm); r = create_vcpu_fd(vcpu); if (r < 0) { kvm_put_kvm(kvm); goto unlock_vcpu_destroy; } kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; smp_wmb(); atomic_inc(&kvm->online_vcpus); mutex_unlock(&kvm->lock); kvm_arch_vcpu_postcreate(vcpu); return r; unlock_vcpu_destroy: mutex_unlock(&kvm->lock); vcpu_destroy: kvm_arch_vcpu_destroy(vcpu); return r; } Commit Message: KVM: Improve create VCPU parameter (CVE-2013-4587) In multiple functions the vcpu_id is used as an offset into a bitfield. Ag malicious user could specify a vcpu_id greater than 255 in order to set or clear bits in kernel memory. This could be used to elevate priveges in the kernel. This patch verifies that the vcpu_id provided is less than 255. The api documentation already specifies that the vcpu_id must be less than max_vcpus, but this is currently not checked. Reported-by: Andrew Honig <[email protected]> Cc: [email protected] Signed-off-by: Andrew Honig <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]> CWE ID: CWE-20
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) { int r; struct kvm_vcpu *vcpu, *v; if (id >= KVM_MAX_VCPUS) return -EINVAL; vcpu = kvm_arch_vcpu_create(kvm, id); if (IS_ERR(vcpu)) return PTR_ERR(vcpu); preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); r = kvm_arch_vcpu_setup(vcpu); if (r) goto vcpu_destroy; mutex_lock(&kvm->lock); if (!kvm_vcpu_compatible(vcpu)) { r = -EINVAL; goto unlock_vcpu_destroy; } if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { r = -EINVAL; goto unlock_vcpu_destroy; } kvm_for_each_vcpu(r, v, kvm) if (v->vcpu_id == id) { r = -EEXIST; goto unlock_vcpu_destroy; } BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); /* Now it's all set up, let userspace reach it */ kvm_get_kvm(kvm); r = create_vcpu_fd(vcpu); if (r < 0) { kvm_put_kvm(kvm); goto unlock_vcpu_destroy; } kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; smp_wmb(); atomic_inc(&kvm->online_vcpus); mutex_unlock(&kvm->lock); kvm_arch_vcpu_postcreate(vcpu); return r; unlock_vcpu_destroy: mutex_unlock(&kvm->lock); vcpu_destroy: kvm_arch_vcpu_destroy(vcpu); return r; }
165,959
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) { struct inode *inode = filp->f_path.dentry->d_inode; struct super_block *sb = inode->i_sb; int len, err; char strbuf[HFSPLUS_MAX_STRLEN + 1]; hfsplus_cat_entry entry; struct hfs_find_data fd; struct hfsplus_readdir_data *rd; u16 type; if (filp->f_pos >= inode->i_size) return 0; err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); if (err) return err; hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL); err = hfs_brec_find(&fd); if (err) goto out; switch ((u32)filp->f_pos) { case 0: /* This is completely artificial... */ if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR)) goto out; filp->f_pos++; /* fall through */ case 1: hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) { printk(KERN_ERR "hfs: bad catalog folder thread\n"); err = -EIO; goto out; } if (fd.entrylength < HFSPLUS_MIN_THREAD_SZ) { printk(KERN_ERR "hfs: truncated catalog thread\n"); err = -EIO; goto out; } if (filldir(dirent, "..", 2, 1, be32_to_cpu(entry.thread.parentID), DT_DIR)) goto out; filp->f_pos++; /* fall through */ default: if (filp->f_pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, filp->f_pos - 1); if (err) goto out; } for (;;) { if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) { printk(KERN_ERR "hfs: walked past end of dir\n"); err = -EIO; goto out; } hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); type = be16_to_cpu(entry.type); len = HFSPLUS_MAX_STRLEN; err = hfsplus_uni2asc(sb, &fd.key->cat.name, strbuf, &len); if (err) goto out; if (type == HFSPLUS_FOLDER) { if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) { printk(KERN_ERR "hfs: small dir entry\n"); err = -EIO; goto out; } if (HFSPLUS_SB(sb)->hidden_dir && HFSPLUS_SB(sb)->hidden_dir->i_ino == be32_to_cpu(entry.folder.id)) goto next; if (filldir(dirent, strbuf, len, filp->f_pos, be32_to_cpu(entry.folder.id), DT_DIR)) break; } else if (type == HFSPLUS_FILE) { if (fd.entrylength < sizeof(struct hfsplus_cat_file)) { printk(KERN_ERR "hfs: small file entry\n"); err = -EIO; goto out; } if (filldir(dirent, strbuf, len, filp->f_pos, be32_to_cpu(entry.file.id), DT_REG)) break; } else { printk(KERN_ERR "hfs: bad catalog entry type\n"); err = -EIO; goto out; } next: filp->f_pos++; if (filp->f_pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, 1); if (err) goto out; } rd = filp->private_data; if (!rd) { rd = kmalloc(sizeof(struct hfsplus_readdir_data), GFP_KERNEL); if (!rd) { err = -ENOMEM; goto out; } filp->private_data = rd; rd->file = filp; list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list); } memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key)); out: hfs_find_exit(&fd); return err; } Commit Message: hfsplus: Fix potential buffer overflows Commit ec81aecb2966 ("hfs: fix a potential buffer overflow") fixed a few potential buffer overflows in the hfs filesystem. But as Timo Warns pointed out, these changes also need to be made on the hfsplus filesystem as well. Reported-by: Timo Warns <[email protected]> Acked-by: WANG Cong <[email protected]> Cc: Alexey Khoroshilov <[email protected]> Cc: Miklos Szeredi <[email protected]> Cc: Sage Weil <[email protected]> Cc: Eugene Teo <[email protected]> Cc: Roman Zippel <[email protected]> Cc: Al Viro <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Alexey Dobriyan <[email protected]> Cc: Dave Anderson <[email protected]> Cc: stable <[email protected]> Cc: Andrew Morton <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> CWE ID: CWE-264
static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) { struct inode *inode = filp->f_path.dentry->d_inode; struct super_block *sb = inode->i_sb; int len, err; char strbuf[HFSPLUS_MAX_STRLEN + 1]; hfsplus_cat_entry entry; struct hfs_find_data fd; struct hfsplus_readdir_data *rd; u16 type; if (filp->f_pos >= inode->i_size) return 0; err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); if (err) return err; hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL); err = hfs_brec_find(&fd); if (err) goto out; switch ((u32)filp->f_pos) { case 0: /* This is completely artificial... */ if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR)) goto out; filp->f_pos++; /* fall through */ case 1: if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { err = -EIO; goto out; } hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) { printk(KERN_ERR "hfs: bad catalog folder thread\n"); err = -EIO; goto out; } if (fd.entrylength < HFSPLUS_MIN_THREAD_SZ) { printk(KERN_ERR "hfs: truncated catalog thread\n"); err = -EIO; goto out; } if (filldir(dirent, "..", 2, 1, be32_to_cpu(entry.thread.parentID), DT_DIR)) goto out; filp->f_pos++; /* fall through */ default: if (filp->f_pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, filp->f_pos - 1); if (err) goto out; } for (;;) { if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) { printk(KERN_ERR "hfs: walked past end of dir\n"); err = -EIO; goto out; } if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { err = -EIO; goto out; } hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); type = be16_to_cpu(entry.type); len = HFSPLUS_MAX_STRLEN; err = hfsplus_uni2asc(sb, &fd.key->cat.name, strbuf, &len); if (err) goto out; if (type == HFSPLUS_FOLDER) { if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) { printk(KERN_ERR "hfs: small dir entry\n"); err = -EIO; goto out; } if (HFSPLUS_SB(sb)->hidden_dir && HFSPLUS_SB(sb)->hidden_dir->i_ino == be32_to_cpu(entry.folder.id)) goto next; if (filldir(dirent, strbuf, len, filp->f_pos, be32_to_cpu(entry.folder.id), DT_DIR)) break; } else if (type == HFSPLUS_FILE) { if (fd.entrylength < sizeof(struct hfsplus_cat_file)) { printk(KERN_ERR "hfs: small file entry\n"); err = -EIO; goto out; } if (filldir(dirent, strbuf, len, filp->f_pos, be32_to_cpu(entry.file.id), DT_REG)) break; } else { printk(KERN_ERR "hfs: bad catalog entry type\n"); err = -EIO; goto out; } next: filp->f_pos++; if (filp->f_pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, 1); if (err) goto out; } rd = filp->private_data; if (!rd) { rd = kmalloc(sizeof(struct hfsplus_readdir_data), GFP_KERNEL); if (!rd) { err = -ENOMEM; goto out; } filp->private_data = rd; rd->file = filp; list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list); } memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key)); out: hfs_find_exit(&fd); return err; }
165,600
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void __init proc_root_init(void) { struct vfsmount *mnt; int err; proc_init_inodecache(); err = register_filesystem(&proc_fs_type); if (err) return; mnt = kern_mount_data(&proc_fs_type, &init_pid_ns); if (IS_ERR(mnt)) { unregister_filesystem(&proc_fs_type); return; } init_pid_ns.proc_mnt = mnt; proc_symlink("mounts", NULL, "self/mounts"); proc_net_init(); #ifdef CONFIG_SYSVIPC proc_mkdir("sysvipc", NULL); #endif proc_mkdir("fs", NULL); proc_mkdir("driver", NULL); proc_mkdir("fs/nfsd", NULL); /* somewhere for the nfsd filesystem to be mounted */ #if defined(CONFIG_SUN_OPENPROMFS) || defined(CONFIG_SUN_OPENPROMFS_MODULE) /* just give it a mountpoint */ proc_mkdir("openprom", NULL); #endif proc_tty_init(); #ifdef CONFIG_PROC_DEVICETREE proc_device_tree_init(); #endif proc_mkdir("bus", NULL); proc_sys_init(); } Commit Message: procfs: fix a vfsmount longterm reference leak kern_mount() doesn't pair with plain mntput()... Signed-off-by: Al Viro <[email protected]> CWE ID: CWE-119
void __init proc_root_init(void) { int err; proc_init_inodecache(); err = register_filesystem(&proc_fs_type); if (err) return; err = pid_ns_prepare_proc(&init_pid_ns); if (err) { unregister_filesystem(&proc_fs_type); return; } proc_symlink("mounts", NULL, "self/mounts"); proc_net_init(); #ifdef CONFIG_SYSVIPC proc_mkdir("sysvipc", NULL); #endif proc_mkdir("fs", NULL); proc_mkdir("driver", NULL); proc_mkdir("fs/nfsd", NULL); /* somewhere for the nfsd filesystem to be mounted */ #if defined(CONFIG_SUN_OPENPROMFS) || defined(CONFIG_SUN_OPENPROMFS_MODULE) /* just give it a mountpoint */ proc_mkdir("openprom", NULL); #endif proc_tty_init(); #ifdef CONFIG_PROC_DEVICETREE proc_device_tree_init(); #endif proc_mkdir("bus", NULL); proc_sys_init(); }
165,615
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: InputMethodDescriptors* ChromeOSGetSupportedInputMethodDescriptors() { InputMethodDescriptors* input_methods = new InputMethodDescriptors; for (size_t i = 0; i < arraysize(chromeos::kIBusEngines); ++i) { if (InputMethodIdIsWhitelisted(chromeos::kIBusEngines[i].name)) { input_methods->push_back(chromeos::CreateInputMethodDescriptor( chromeos::kIBusEngines[i].name, chromeos::kIBusEngines[i].longname, chromeos::kIBusEngines[i].layout, chromeos::kIBusEngines[i].language)); } } return input_methods; } Commit Message: Remove use of libcros from InputMethodLibrary. BUG=chromium-os:16238 TEST==confirm that input methods work as before on the netbook. Also confirm that the chrome builds and works on the desktop as before. Review URL: http://codereview.chromium.org/7003086 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@89142 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
InputMethodDescriptors* ChromeOSGetSupportedInputMethodDescriptors() { virtual void RemoveObserver(Observer* observer) { }
170,523
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: download::DownloadInterruptReason DownloadManagerImpl::BeginDownloadRequest( std::unique_ptr<net::URLRequest> url_request, ResourceContext* resource_context, download::DownloadUrlParameters* params) { if (ResourceDispatcherHostImpl::Get()->is_shutdown()) return download::DOWNLOAD_INTERRUPT_REASON_USER_SHUTDOWN; ResourceDispatcherHostImpl::Get()->InitializeURLRequest( url_request.get(), Referrer(params->referrer(), Referrer::NetReferrerPolicyToBlinkReferrerPolicy( params->referrer_policy())), true, // download. params->render_process_host_id(), params->render_view_host_routing_id(), params->render_frame_host_routing_id(), PREVIEWS_OFF, resource_context); url_request->set_first_party_url_policy( net::URLRequest::UPDATE_FIRST_PARTY_URL_ON_REDIRECT); const GURL& url = url_request->original_url(); const net::URLRequestContext* request_context = url_request->context(); if (!request_context->job_factory()->IsHandledProtocol(url.scheme())) { DVLOG(1) << "Download request for unsupported protocol: " << url.possibly_invalid_spec(); return download::DOWNLOAD_INTERRUPT_REASON_NETWORK_INVALID_REQUEST; } std::unique_ptr<ResourceHandler> handler( DownloadResourceHandler::CreateForNewRequest( url_request.get(), params->request_origin(), params->download_source(), params->follow_cross_origin_redirects())); ResourceDispatcherHostImpl::Get()->BeginURLRequest( std::move(url_request), std::move(handler), true, // download params->content_initiated(), params->do_not_prompt_for_login(), resource_context); return download::DOWNLOAD_INTERRUPT_REASON_NONE; } Commit Message: When turning a download into a navigation, navigate the right frame Code changes from Nate Chapin <[email protected]> Bug: 926105 Change-Id: I098599394e6ebe7d2fce5af838014297a337d294 Reviewed-on: https://chromium-review.googlesource.com/c/1454962 Reviewed-by: Camille Lamy <[email protected]> Commit-Queue: Jochen Eisinger <[email protected]> Cr-Commit-Position: refs/heads/master@{#629547} CWE ID: CWE-284
download::DownloadInterruptReason DownloadManagerImpl::BeginDownloadRequest( std::unique_ptr<net::URLRequest> url_request, ResourceContext* resource_context, download::DownloadUrlParameters* params) { if (ResourceDispatcherHostImpl::Get()->is_shutdown()) return download::DOWNLOAD_INTERRUPT_REASON_USER_SHUTDOWN; ResourceDispatcherHostImpl::Get()->InitializeURLRequest( url_request.get(), Referrer(params->referrer(), Referrer::NetReferrerPolicyToBlinkReferrerPolicy( params->referrer_policy())), true, // download. params->render_process_host_id(), params->render_view_host_routing_id(), params->render_frame_host_routing_id(), params->frame_tree_node_id(), PREVIEWS_OFF, resource_context); url_request->set_first_party_url_policy( net::URLRequest::UPDATE_FIRST_PARTY_URL_ON_REDIRECT); const GURL& url = url_request->original_url(); const net::URLRequestContext* request_context = url_request->context(); if (!request_context->job_factory()->IsHandledProtocol(url.scheme())) { DVLOG(1) << "Download request for unsupported protocol: " << url.possibly_invalid_spec(); return download::DOWNLOAD_INTERRUPT_REASON_NETWORK_INVALID_REQUEST; } std::unique_ptr<ResourceHandler> handler( DownloadResourceHandler::CreateForNewRequest( url_request.get(), params->request_origin(), params->download_source(), params->follow_cross_origin_redirects())); ResourceDispatcherHostImpl::Get()->BeginURLRequest( std::move(url_request), std::move(handler), true, // download params->content_initiated(), params->do_not_prompt_for_login(), resource_context); return download::DOWNLOAD_INTERRUPT_REASON_NONE; }
173,021
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: char *path_name(struct strbuf *path, const char *name) { struct strbuf ret = STRBUF_INIT; if (path) strbuf_addbuf(&ret, path); strbuf_addstr(&ret, name); return strbuf_detach(&ret, NULL); } Commit Message: list-objects: pass full pathname to callbacks When we find a blob at "a/b/c", we currently pass this to our show_object_fn callbacks as two components: "a/b/" and "c". Callbacks which want the full value then call path_name(), which concatenates the two. But this is an inefficient interface; the path is a strbuf, and we could simply append "c" to it temporarily, then roll back the length, without creating a new copy. So we could improve this by teaching the callsites of path_name() this trick (and there are only 3). But we can also notice that no callback actually cares about the broken-down representation, and simply pass each callback the full path "a/b/c" as a string. The callback code becomes even simpler, then, as we do not have to worry about freeing an allocated buffer, nor rolling back our modification to the strbuf. This is theoretically less efficient, as some callbacks would not bother to format the final path component. But in practice this is not measurable. Since we use the same strbuf over and over, our work to grow it is amortized, and we really only pay to memcpy a few bytes. Signed-off-by: Jeff King <[email protected]> Signed-off-by: Junio C Hamano <[email protected]> CWE ID: CWE-119
char *path_name(struct strbuf *path, const char *name) void show_object_with_name(FILE *out, struct object *obj, const char *name) {
167,426
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: validate_event(struct pmu_hw_events *hw_events, struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct pmu *leader_pmu = event->group_leader->pmu; if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) return 1; if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) return 1; return armpmu->get_event_idx(hw_events, event) >= 0; } Commit Message: ARM: 7809/1: perf: fix event validation for software group leaders It is possible to construct an event group with a software event as a group leader and then subsequently add a hardware event to the group. This results in the event group being validated by adding all members of the group to a fake PMU and attempting to allocate each event on their respective PMU. Unfortunately, for software events wthout a corresponding arm_pmu, this results in a kernel crash attempting to dereference the ->get_event_idx function pointer. This patch fixes the problem by checking explicitly for software events and ignoring those in event validation (since they can always be scheduled). We will probably want to revisit this for 3.12, since the validation checks don't appear to work correctly when dealing with multiple hardware PMUs anyway. Cc: <[email protected]> Reported-by: Vince Weaver <[email protected]> Tested-by: Vince Weaver <[email protected]> Tested-by: Mark Rutland <[email protected]> Signed-off-by: Will Deacon <[email protected]> Signed-off-by: Russell King <[email protected]> CWE ID: CWE-20
validate_event(struct pmu_hw_events *hw_events, struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct pmu *leader_pmu = event->group_leader->pmu; if (is_software_event(event)) return 1; if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) return 1; if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) return 1; return armpmu->get_event_idx(hw_events, event) >= 0; }
166,009
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: read_header(struct archive_read *a, struct archive_entry *entry, char head_type) { const void *h; const char *p, *endp; struct rar *rar; struct rar_header rar_header; struct rar_file_header file_header; int64_t header_size; unsigned filename_size, end; char *filename; char *strp; char packed_size[8]; char unp_size[8]; int ttime; struct archive_string_conv *sconv, *fn_sconv; unsigned long crc32_val; int ret = (ARCHIVE_OK), ret2; rar = (struct rar *)(a->format->data); /* Setup a string conversion object for non-rar-unicode filenames. */ sconv = rar->opt_sconv; if (sconv == NULL) { if (!rar->init_default_conversion) { rar->sconv_default = archive_string_default_conversion_for_read( &(a->archive)); rar->init_default_conversion = 1; } sconv = rar->sconv_default; } if ((h = __archive_read_ahead(a, 7, NULL)) == NULL) return (ARCHIVE_FATAL); p = h; memcpy(&rar_header, p, sizeof(rar_header)); rar->file_flags = archive_le16dec(rar_header.flags); header_size = archive_le16dec(rar_header.size); if (header_size < (int64_t)sizeof(file_header) + 7) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid header size"); return (ARCHIVE_FATAL); } crc32_val = crc32(0, (const unsigned char *)p + 2, 7 - 2); __archive_read_consume(a, 7); if (!(rar->file_flags & FHD_SOLID)) { rar->compression_method = 0; rar->packed_size = 0; rar->unp_size = 0; rar->mtime = 0; rar->ctime = 0; rar->atime = 0; rar->arctime = 0; rar->mode = 0; memset(&rar->salt, 0, sizeof(rar->salt)); rar->atime = 0; rar->ansec = 0; rar->ctime = 0; rar->cnsec = 0; rar->mtime = 0; rar->mnsec = 0; rar->arctime = 0; rar->arcnsec = 0; } else { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "RAR solid archive support unavailable."); return (ARCHIVE_FATAL); } if ((h = __archive_read_ahead(a, (size_t)header_size - 7, NULL)) == NULL) return (ARCHIVE_FATAL); /* File Header CRC check. */ crc32_val = crc32(crc32_val, h, (unsigned)(header_size - 7)); if ((crc32_val & 0xffff) != archive_le16dec(rar_header.crc)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Header CRC error"); return (ARCHIVE_FATAL); } /* If no CRC error, Go on parsing File Header. */ p = h; endp = p + header_size - 7; memcpy(&file_header, p, sizeof(file_header)); p += sizeof(file_header); rar->compression_method = file_header.method; ttime = archive_le32dec(file_header.file_time); rar->mtime = get_time(ttime); rar->file_crc = archive_le32dec(file_header.file_crc); if (rar->file_flags & FHD_PASSWORD) { archive_entry_set_is_data_encrypted(entry, 1); rar->has_encrypted_entries = 1; archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "RAR encryption support unavailable."); /* Since it is only the data part itself that is encrypted we can at least extract information about the currently processed entry and don't need to return ARCHIVE_FATAL here. */ /*return (ARCHIVE_FATAL);*/ } if (rar->file_flags & FHD_LARGE) { memcpy(packed_size, file_header.pack_size, 4); memcpy(packed_size + 4, p, 4); /* High pack size */ p += 4; memcpy(unp_size, file_header.unp_size, 4); memcpy(unp_size + 4, p, 4); /* High unpack size */ p += 4; rar->packed_size = archive_le64dec(&packed_size); rar->unp_size = archive_le64dec(&unp_size); } else { rar->packed_size = archive_le32dec(file_header.pack_size); rar->unp_size = archive_le32dec(file_header.unp_size); } if (rar->packed_size < 0 || rar->unp_size < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid sizes specified."); return (ARCHIVE_FATAL); } rar->bytes_remaining = rar->packed_size; /* TODO: RARv3 subblocks contain comments. For now the complete block is * consumed at the end. */ if (head_type == NEWSUB_HEAD) { size_t distance = p - (const char *)h; header_size += rar->packed_size; /* Make sure we have the extended data. */ if ((h = __archive_read_ahead(a, (size_t)header_size - 7, NULL)) == NULL) return (ARCHIVE_FATAL); p = h; endp = p + header_size - 7; p += distance; } filename_size = archive_le16dec(file_header.name_size); if (p + filename_size > endp) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid filename size"); return (ARCHIVE_FATAL); } if (rar->filename_allocated < filename_size * 2 + 2) { char *newptr; size_t newsize = filename_size * 2 + 2; newptr = realloc(rar->filename, newsize); if (newptr == NULL) { archive_set_error(&a->archive, ENOMEM, "Couldn't allocate memory."); return (ARCHIVE_FATAL); } rar->filename = newptr; rar->filename_allocated = newsize; } filename = rar->filename; memcpy(filename, p, filename_size); filename[filename_size] = '\0'; if (rar->file_flags & FHD_UNICODE) { if (filename_size != strlen(filename)) { unsigned char highbyte, flagbits, flagbyte; unsigned fn_end, offset; end = filename_size; fn_end = filename_size * 2; filename_size = 0; offset = (unsigned)strlen(filename) + 1; highbyte = *(p + offset++); flagbits = 0; flagbyte = 0; while (offset < end && filename_size < fn_end) { if (!flagbits) { flagbyte = *(p + offset++); flagbits = 8; } flagbits -= 2; switch((flagbyte >> flagbits) & 3) { case 0: filename[filename_size++] = '\0'; filename[filename_size++] = *(p + offset++); break; case 1: filename[filename_size++] = highbyte; filename[filename_size++] = *(p + offset++); break; case 2: filename[filename_size++] = *(p + offset + 1); filename[filename_size++] = *(p + offset); offset += 2; break; case 3: { char extra, high; uint8_t length = *(p + offset++); if (length & 0x80) { extra = *(p + offset++); high = (char)highbyte; } else extra = high = 0; length = (length & 0x7f) + 2; while (length && filename_size < fn_end) { unsigned cp = filename_size >> 1; filename[filename_size++] = high; filename[filename_size++] = p[cp] + extra; length--; } } break; } } if (filename_size > fn_end) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid filename"); return (ARCHIVE_FATAL); } filename[filename_size++] = '\0'; filename[filename_size++] = '\0'; /* Decoded unicode form is UTF-16BE, so we have to update a string * conversion object for it. */ if (rar->sconv_utf16be == NULL) { rar->sconv_utf16be = archive_string_conversion_from_charset( &a->archive, "UTF-16BE", 1); if (rar->sconv_utf16be == NULL) return (ARCHIVE_FATAL); } fn_sconv = rar->sconv_utf16be; strp = filename; while (memcmp(strp, "\x00\x00", 2)) { if (!memcmp(strp, "\x00\\", 2)) *(strp + 1) = '/'; strp += 2; } p += offset; } else { /* * If FHD_UNICODE is set but no unicode data, this file name form * is UTF-8, so we have to update a string conversion object for * it accordingly. */ if (rar->sconv_utf8 == NULL) { rar->sconv_utf8 = archive_string_conversion_from_charset( &a->archive, "UTF-8", 1); if (rar->sconv_utf8 == NULL) return (ARCHIVE_FATAL); } fn_sconv = rar->sconv_utf8; while ((strp = strchr(filename, '\\')) != NULL) *strp = '/'; p += filename_size; } } else { fn_sconv = sconv; while ((strp = strchr(filename, '\\')) != NULL) *strp = '/'; p += filename_size; } /* Split file in multivolume RAR. No more need to process header. */ if (rar->filename_save && filename_size == rar->filename_save_size && !memcmp(rar->filename, rar->filename_save, filename_size + 1)) { __archive_read_consume(a, header_size - 7); rar->cursor++; if (rar->cursor >= rar->nodes) { rar->nodes++; if ((rar->dbo = realloc(rar->dbo, sizeof(*rar->dbo) * rar->nodes)) == NULL) { archive_set_error(&a->archive, ENOMEM, "Couldn't allocate memory."); return (ARCHIVE_FATAL); } rar->dbo[rar->cursor].header_size = header_size; rar->dbo[rar->cursor].start_offset = -1; rar->dbo[rar->cursor].end_offset = -1; } if (rar->dbo[rar->cursor].start_offset < 0) { rar->dbo[rar->cursor].start_offset = a->filter->position; rar->dbo[rar->cursor].end_offset = rar->dbo[rar->cursor].start_offset + rar->packed_size; } return ret; } rar->filename_save = (char*)realloc(rar->filename_save, filename_size + 1); memcpy(rar->filename_save, rar->filename, filename_size + 1); rar->filename_save_size = filename_size; /* Set info for seeking */ free(rar->dbo); if ((rar->dbo = calloc(1, sizeof(*rar->dbo))) == NULL) { archive_set_error(&a->archive, ENOMEM, "Couldn't allocate memory."); return (ARCHIVE_FATAL); } rar->dbo[0].header_size = header_size; rar->dbo[0].start_offset = -1; rar->dbo[0].end_offset = -1; rar->cursor = 0; rar->nodes = 1; if (rar->file_flags & FHD_SALT) { if (p + 8 > endp) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid header size"); return (ARCHIVE_FATAL); } memcpy(rar->salt, p, 8); p += 8; } if (rar->file_flags & FHD_EXTTIME) { if (read_exttime(p, rar, endp) < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid header size"); return (ARCHIVE_FATAL); } } __archive_read_consume(a, header_size - 7); rar->dbo[0].start_offset = a->filter->position; rar->dbo[0].end_offset = rar->dbo[0].start_offset + rar->packed_size; switch(file_header.host_os) { case OS_MSDOS: case OS_OS2: case OS_WIN32: rar->mode = archive_le32dec(file_header.file_attr); if (rar->mode & FILE_ATTRIBUTE_DIRECTORY) rar->mode = AE_IFDIR | S_IXUSR | S_IXGRP | S_IXOTH; else rar->mode = AE_IFREG; rar->mode |= S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; break; case OS_UNIX: case OS_MAC_OS: case OS_BEOS: rar->mode = archive_le32dec(file_header.file_attr); break; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unknown file attributes from RAR file's host OS"); return (ARCHIVE_FATAL); } rar->bytes_uncopied = rar->bytes_unconsumed = 0; rar->lzss.position = rar->offset = 0; rar->offset_seek = 0; rar->dictionary_size = 0; rar->offset_outgoing = 0; rar->br.cache_avail = 0; rar->br.avail_in = 0; rar->crc_calculated = 0; rar->entry_eof = 0; rar->valid = 1; rar->is_ppmd_block = 0; rar->start_new_table = 1; free(rar->unp_buffer); rar->unp_buffer = NULL; rar->unp_offset = 0; rar->unp_buffer_size = UNP_BUFFER_SIZE; memset(rar->lengthtable, 0, sizeof(rar->lengthtable)); __archive_ppmd7_functions.Ppmd7_Free(&rar->ppmd7_context, &g_szalloc); rar->ppmd_valid = rar->ppmd_eod = 0; /* Don't set any archive entries for non-file header types */ if (head_type == NEWSUB_HEAD) return ret; archive_entry_set_mtime(entry, rar->mtime, rar->mnsec); archive_entry_set_ctime(entry, rar->ctime, rar->cnsec); archive_entry_set_atime(entry, rar->atime, rar->ansec); archive_entry_set_size(entry, rar->unp_size); archive_entry_set_mode(entry, rar->mode); if (archive_entry_copy_pathname_l(entry, filename, filename_size, fn_sconv)) { if (errno == ENOMEM) { archive_set_error(&a->archive, ENOMEM, "Can't allocate memory for Pathname"); return (ARCHIVE_FATAL); } archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Pathname cannot be converted from %s to current locale.", archive_string_conversion_charset_name(fn_sconv)); ret = (ARCHIVE_WARN); } if (((rar->mode) & AE_IFMT) == AE_IFLNK) { /* Make sure a symbolic-link file does not have its body. */ rar->bytes_remaining = 0; archive_entry_set_size(entry, 0); /* Read a symbolic-link name. */ if ((ret2 = read_symlink_stored(a, entry, sconv)) < (ARCHIVE_WARN)) return ret2; if (ret > ret2) ret = ret2; } if (rar->bytes_remaining == 0) rar->entry_eof = 1; return ret; } Commit Message: Avoid a read off-by-one error for UTF16 names in RAR archives. Reported-By: OSS-Fuzz issue 573 CWE ID: CWE-125
read_header(struct archive_read *a, struct archive_entry *entry, char head_type) { const void *h; const char *p, *endp; struct rar *rar; struct rar_header rar_header; struct rar_file_header file_header; int64_t header_size; unsigned filename_size, end; char *filename; char *strp; char packed_size[8]; char unp_size[8]; int ttime; struct archive_string_conv *sconv, *fn_sconv; unsigned long crc32_val; int ret = (ARCHIVE_OK), ret2; rar = (struct rar *)(a->format->data); /* Setup a string conversion object for non-rar-unicode filenames. */ sconv = rar->opt_sconv; if (sconv == NULL) { if (!rar->init_default_conversion) { rar->sconv_default = archive_string_default_conversion_for_read( &(a->archive)); rar->init_default_conversion = 1; } sconv = rar->sconv_default; } if ((h = __archive_read_ahead(a, 7, NULL)) == NULL) return (ARCHIVE_FATAL); p = h; memcpy(&rar_header, p, sizeof(rar_header)); rar->file_flags = archive_le16dec(rar_header.flags); header_size = archive_le16dec(rar_header.size); if (header_size < (int64_t)sizeof(file_header) + 7) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid header size"); return (ARCHIVE_FATAL); } crc32_val = crc32(0, (const unsigned char *)p + 2, 7 - 2); __archive_read_consume(a, 7); if (!(rar->file_flags & FHD_SOLID)) { rar->compression_method = 0; rar->packed_size = 0; rar->unp_size = 0; rar->mtime = 0; rar->ctime = 0; rar->atime = 0; rar->arctime = 0; rar->mode = 0; memset(&rar->salt, 0, sizeof(rar->salt)); rar->atime = 0; rar->ansec = 0; rar->ctime = 0; rar->cnsec = 0; rar->mtime = 0; rar->mnsec = 0; rar->arctime = 0; rar->arcnsec = 0; } else { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "RAR solid archive support unavailable."); return (ARCHIVE_FATAL); } if ((h = __archive_read_ahead(a, (size_t)header_size - 7, NULL)) == NULL) return (ARCHIVE_FATAL); /* File Header CRC check. */ crc32_val = crc32(crc32_val, h, (unsigned)(header_size - 7)); if ((crc32_val & 0xffff) != archive_le16dec(rar_header.crc)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Header CRC error"); return (ARCHIVE_FATAL); } /* If no CRC error, Go on parsing File Header. */ p = h; endp = p + header_size - 7; memcpy(&file_header, p, sizeof(file_header)); p += sizeof(file_header); rar->compression_method = file_header.method; ttime = archive_le32dec(file_header.file_time); rar->mtime = get_time(ttime); rar->file_crc = archive_le32dec(file_header.file_crc); if (rar->file_flags & FHD_PASSWORD) { archive_entry_set_is_data_encrypted(entry, 1); rar->has_encrypted_entries = 1; archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "RAR encryption support unavailable."); /* Since it is only the data part itself that is encrypted we can at least extract information about the currently processed entry and don't need to return ARCHIVE_FATAL here. */ /*return (ARCHIVE_FATAL);*/ } if (rar->file_flags & FHD_LARGE) { memcpy(packed_size, file_header.pack_size, 4); memcpy(packed_size + 4, p, 4); /* High pack size */ p += 4; memcpy(unp_size, file_header.unp_size, 4); memcpy(unp_size + 4, p, 4); /* High unpack size */ p += 4; rar->packed_size = archive_le64dec(&packed_size); rar->unp_size = archive_le64dec(&unp_size); } else { rar->packed_size = archive_le32dec(file_header.pack_size); rar->unp_size = archive_le32dec(file_header.unp_size); } if (rar->packed_size < 0 || rar->unp_size < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid sizes specified."); return (ARCHIVE_FATAL); } rar->bytes_remaining = rar->packed_size; /* TODO: RARv3 subblocks contain comments. For now the complete block is * consumed at the end. */ if (head_type == NEWSUB_HEAD) { size_t distance = p - (const char *)h; header_size += rar->packed_size; /* Make sure we have the extended data. */ if ((h = __archive_read_ahead(a, (size_t)header_size - 7, NULL)) == NULL) return (ARCHIVE_FATAL); p = h; endp = p + header_size - 7; p += distance; } filename_size = archive_le16dec(file_header.name_size); if (p + filename_size > endp) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid filename size"); return (ARCHIVE_FATAL); } if (rar->filename_allocated < filename_size * 2 + 2) { char *newptr; size_t newsize = filename_size * 2 + 2; newptr = realloc(rar->filename, newsize); if (newptr == NULL) { archive_set_error(&a->archive, ENOMEM, "Couldn't allocate memory."); return (ARCHIVE_FATAL); } rar->filename = newptr; rar->filename_allocated = newsize; } filename = rar->filename; memcpy(filename, p, filename_size); filename[filename_size] = '\0'; if (rar->file_flags & FHD_UNICODE) { if (filename_size != strlen(filename)) { unsigned char highbyte, flagbits, flagbyte; unsigned fn_end, offset; end = filename_size; fn_end = filename_size * 2; filename_size = 0; offset = (unsigned)strlen(filename) + 1; highbyte = *(p + offset++); flagbits = 0; flagbyte = 0; while (offset < end && filename_size < fn_end) { if (!flagbits) { flagbyte = *(p + offset++); flagbits = 8; } flagbits -= 2; switch((flagbyte >> flagbits) & 3) { case 0: filename[filename_size++] = '\0'; filename[filename_size++] = *(p + offset++); break; case 1: filename[filename_size++] = highbyte; filename[filename_size++] = *(p + offset++); break; case 2: filename[filename_size++] = *(p + offset + 1); filename[filename_size++] = *(p + offset); offset += 2; break; case 3: { char extra, high; uint8_t length = *(p + offset++); if (length & 0x80) { extra = *(p + offset++); high = (char)highbyte; } else extra = high = 0; length = (length & 0x7f) + 2; while (length && filename_size < fn_end) { unsigned cp = filename_size >> 1; filename[filename_size++] = high; filename[filename_size++] = p[cp] + extra; length--; } } break; } } if (filename_size > fn_end) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid filename"); return (ARCHIVE_FATAL); } filename[filename_size++] = '\0'; /* * Do not increment filename_size here as the computations below * add the space for the terminating NUL explicitly. */ filename[filename_size] = '\0'; /* Decoded unicode form is UTF-16BE, so we have to update a string * conversion object for it. */ if (rar->sconv_utf16be == NULL) { rar->sconv_utf16be = archive_string_conversion_from_charset( &a->archive, "UTF-16BE", 1); if (rar->sconv_utf16be == NULL) return (ARCHIVE_FATAL); } fn_sconv = rar->sconv_utf16be; strp = filename; while (memcmp(strp, "\x00\x00", 2)) { if (!memcmp(strp, "\x00\\", 2)) *(strp + 1) = '/'; strp += 2; } p += offset; } else { /* * If FHD_UNICODE is set but no unicode data, this file name form * is UTF-8, so we have to update a string conversion object for * it accordingly. */ if (rar->sconv_utf8 == NULL) { rar->sconv_utf8 = archive_string_conversion_from_charset( &a->archive, "UTF-8", 1); if (rar->sconv_utf8 == NULL) return (ARCHIVE_FATAL); } fn_sconv = rar->sconv_utf8; while ((strp = strchr(filename, '\\')) != NULL) *strp = '/'; p += filename_size; } } else { fn_sconv = sconv; while ((strp = strchr(filename, '\\')) != NULL) *strp = '/'; p += filename_size; } /* Split file in multivolume RAR. No more need to process header. */ if (rar->filename_save && filename_size == rar->filename_save_size && !memcmp(rar->filename, rar->filename_save, filename_size + 1)) { __archive_read_consume(a, header_size - 7); rar->cursor++; if (rar->cursor >= rar->nodes) { rar->nodes++; if ((rar->dbo = realloc(rar->dbo, sizeof(*rar->dbo) * rar->nodes)) == NULL) { archive_set_error(&a->archive, ENOMEM, "Couldn't allocate memory."); return (ARCHIVE_FATAL); } rar->dbo[rar->cursor].header_size = header_size; rar->dbo[rar->cursor].start_offset = -1; rar->dbo[rar->cursor].end_offset = -1; } if (rar->dbo[rar->cursor].start_offset < 0) { rar->dbo[rar->cursor].start_offset = a->filter->position; rar->dbo[rar->cursor].end_offset = rar->dbo[rar->cursor].start_offset + rar->packed_size; } return ret; } rar->filename_save = (char*)realloc(rar->filename_save, filename_size + 1); memcpy(rar->filename_save, rar->filename, filename_size + 1); rar->filename_save_size = filename_size; /* Set info for seeking */ free(rar->dbo); if ((rar->dbo = calloc(1, sizeof(*rar->dbo))) == NULL) { archive_set_error(&a->archive, ENOMEM, "Couldn't allocate memory."); return (ARCHIVE_FATAL); } rar->dbo[0].header_size = header_size; rar->dbo[0].start_offset = -1; rar->dbo[0].end_offset = -1; rar->cursor = 0; rar->nodes = 1; if (rar->file_flags & FHD_SALT) { if (p + 8 > endp) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid header size"); return (ARCHIVE_FATAL); } memcpy(rar->salt, p, 8); p += 8; } if (rar->file_flags & FHD_EXTTIME) { if (read_exttime(p, rar, endp) < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid header size"); return (ARCHIVE_FATAL); } } __archive_read_consume(a, header_size - 7); rar->dbo[0].start_offset = a->filter->position; rar->dbo[0].end_offset = rar->dbo[0].start_offset + rar->packed_size; switch(file_header.host_os) { case OS_MSDOS: case OS_OS2: case OS_WIN32: rar->mode = archive_le32dec(file_header.file_attr); if (rar->mode & FILE_ATTRIBUTE_DIRECTORY) rar->mode = AE_IFDIR | S_IXUSR | S_IXGRP | S_IXOTH; else rar->mode = AE_IFREG; rar->mode |= S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; break; case OS_UNIX: case OS_MAC_OS: case OS_BEOS: rar->mode = archive_le32dec(file_header.file_attr); break; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unknown file attributes from RAR file's host OS"); return (ARCHIVE_FATAL); } rar->bytes_uncopied = rar->bytes_unconsumed = 0; rar->lzss.position = rar->offset = 0; rar->offset_seek = 0; rar->dictionary_size = 0; rar->offset_outgoing = 0; rar->br.cache_avail = 0; rar->br.avail_in = 0; rar->crc_calculated = 0; rar->entry_eof = 0; rar->valid = 1; rar->is_ppmd_block = 0; rar->start_new_table = 1; free(rar->unp_buffer); rar->unp_buffer = NULL; rar->unp_offset = 0; rar->unp_buffer_size = UNP_BUFFER_SIZE; memset(rar->lengthtable, 0, sizeof(rar->lengthtable)); __archive_ppmd7_functions.Ppmd7_Free(&rar->ppmd7_context, &g_szalloc); rar->ppmd_valid = rar->ppmd_eod = 0; /* Don't set any archive entries for non-file header types */ if (head_type == NEWSUB_HEAD) return ret; archive_entry_set_mtime(entry, rar->mtime, rar->mnsec); archive_entry_set_ctime(entry, rar->ctime, rar->cnsec); archive_entry_set_atime(entry, rar->atime, rar->ansec); archive_entry_set_size(entry, rar->unp_size); archive_entry_set_mode(entry, rar->mode); if (archive_entry_copy_pathname_l(entry, filename, filename_size, fn_sconv)) { if (errno == ENOMEM) { archive_set_error(&a->archive, ENOMEM, "Can't allocate memory for Pathname"); return (ARCHIVE_FATAL); } archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Pathname cannot be converted from %s to current locale.", archive_string_conversion_charset_name(fn_sconv)); ret = (ARCHIVE_WARN); } if (((rar->mode) & AE_IFMT) == AE_IFLNK) { /* Make sure a symbolic-link file does not have its body. */ rar->bytes_remaining = 0; archive_entry_set_size(entry, 0); /* Read a symbolic-link name. */ if ((ret2 = read_symlink_stored(a, entry, sconv)) < (ARCHIVE_WARN)) return ret2; if (ret > ret2) ret = ret2; } if (rar->bytes_remaining == 0) rar->entry_eof = 1; return ret; }
167,754
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int URI_FUNC(ComposeQueryEngine)(URI_CHAR * dest, const URI_TYPE(QueryList) * queryList, int maxChars, int * charsWritten, int * charsRequired, UriBool spaceToPlus, UriBool normalizeBreaks) { UriBool firstItem = URI_TRUE; int ampersandLen = 0; /* increased to 1 from second item on */ URI_CHAR * write = dest; /* Subtract terminator */ if (dest == NULL) { *charsRequired = 0; } else { maxChars--; } while (queryList != NULL) { const URI_CHAR * const key = queryList->key; const URI_CHAR * const value = queryList->value; const int worstCase = (normalizeBreaks == URI_TRUE ? 6 : 3); const int keyLen = (key == NULL) ? 0 : (int)URI_STRLEN(key); const int keyRequiredChars = worstCase * keyLen; const int valueLen = (value == NULL) ? 0 : (int)URI_STRLEN(value); const int valueRequiredChars = worstCase * valueLen; if (dest == NULL) { (*charsRequired) += ampersandLen + keyRequiredChars + ((value == NULL) ? 0 : 1 + valueRequiredChars); if (firstItem == URI_TRUE) { ampersandLen = 1; firstItem = URI_FALSE; } } else { if ((write - dest) + ampersandLen + keyRequiredChars > maxChars) { return URI_ERROR_OUTPUT_TOO_LARGE; } /* Copy key */ if (firstItem == URI_TRUE) { ampersandLen = 1; firstItem = URI_FALSE; } else { write[0] = _UT('&'); write++; } write = URI_FUNC(EscapeEx)(key, key + keyLen, write, spaceToPlus, normalizeBreaks); if (value != NULL) { if ((write - dest) + 1 + valueRequiredChars > maxChars) { return URI_ERROR_OUTPUT_TOO_LARGE; } /* Copy value */ write[0] = _UT('='); write++; write = URI_FUNC(EscapeEx)(value, value + valueLen, write, spaceToPlus, normalizeBreaks); } } queryList = queryList->next; } if (dest != NULL) { write[0] = _UT('\0'); if (charsWritten != NULL) { *charsWritten = (int)(write - dest) + 1; /* .. for terminator */ } } return URI_SUCCESS; } Commit Message: UriQuery.c: Catch integer overflow in ComposeQuery and ...Ex CWE ID: CWE-190
int URI_FUNC(ComposeQueryEngine)(URI_CHAR * dest, const URI_TYPE(QueryList) * queryList, int maxChars, int * charsWritten, int * charsRequired, UriBool spaceToPlus, UriBool normalizeBreaks) { UriBool firstItem = URI_TRUE; int ampersandLen = 0; /* increased to 1 from second item on */ URI_CHAR * write = dest; /* Subtract terminator */ if (dest == NULL) { *charsRequired = 0; } else { maxChars--; } while (queryList != NULL) { const URI_CHAR * const key = queryList->key; const URI_CHAR * const value = queryList->value; const int worstCase = (normalizeBreaks == URI_TRUE ? 6 : 3); const int keyLen = (key == NULL) ? 0 : (int)URI_STRLEN(key); int keyRequiredChars; const int valueLen = (value == NULL) ? 0 : (int)URI_STRLEN(value); int valueRequiredChars; if ((keyLen >= INT_MAX / worstCase) || (valueLen >= INT_MAX / worstCase)) { return URI_ERROR_OUTPUT_TOO_LARGE; } keyRequiredChars = worstCase * keyLen; valueRequiredChars = worstCase * valueLen; if (dest == NULL) { (*charsRequired) += ampersandLen + keyRequiredChars + ((value == NULL) ? 0 : 1 + valueRequiredChars); if (firstItem == URI_TRUE) { ampersandLen = 1; firstItem = URI_FALSE; } } else { if ((write - dest) + ampersandLen + keyRequiredChars > maxChars) { return URI_ERROR_OUTPUT_TOO_LARGE; } /* Copy key */ if (firstItem == URI_TRUE) { ampersandLen = 1; firstItem = URI_FALSE; } else { write[0] = _UT('&'); write++; } write = URI_FUNC(EscapeEx)(key, key + keyLen, write, spaceToPlus, normalizeBreaks); if (value != NULL) { if ((write - dest) + 1 + valueRequiredChars > maxChars) { return URI_ERROR_OUTPUT_TOO_LARGE; } /* Copy value */ write[0] = _UT('='); write++; write = URI_FUNC(EscapeEx)(value, value + valueLen, write, spaceToPlus, normalizeBreaks); } } queryList = queryList->next; } if (dest != NULL) { write[0] = _UT('\0'); if (charsWritten != NULL) { *charsWritten = (int)(write - dest) + 1; /* .. for terminator */ } } return URI_SUCCESS; }
168,975
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: BufCompressedFill (BufFilePtr f) { CompressedFile *file; register char_type *stackp, *de_stack; register char_type finchar; register code_int code, oldcode, incode; BufChar *buf, *bufend; file = (CompressedFile *) f->private; buf = f->buffer; bufend = buf + BUFFILESIZE; stackp = file->stackp; de_stack = file->de_stack; finchar = file->finchar; oldcode = file->oldcode; while (buf < bufend) { while (stackp > de_stack && buf < bufend) *buf++ = *--stackp; if (buf == bufend) break; if (oldcode == -1) break; code = getcode (file); if (code == -1) break; if ( (code == CLEAR) && file->block_compress ) { for ( code = 255; code >= 0; code-- ) file->tab_prefix[code] = 0; file->clear_flg = 1; file->free_ent = FIRST - 1; if ( (code = getcode (file)) == -1 ) /* O, untimely death! */ break; } incode = code; /* * Special case for KwKwK string. */ if ( code >= file->free_ent ) { *stackp++ = finchar; code = oldcode; } /* * Generate output characters in reverse order */ while ( code >= 256 ) { *stackp++ = file->tab_suffix[code]; code = file->tab_prefix[code]; } /* * Generate the new entry. */ if ( (code=file->free_ent) < file->maxmaxcode ) { file->tab_prefix[code] = (unsigned short)oldcode; file->tab_suffix[code] = finchar; file->free_ent = code+1; } /* * Remember previous code. */ oldcode = incode; } file->oldcode = oldcode; file->stackp = stackp; file->finchar = finchar; if (buf == f->buffer) { f->left = 0; return BUFFILEEOF; } f->bufp = f->buffer + 1; f->left = (buf - f->buffer) - 1; return f->buffer[0]; } Commit Message: CWE ID: CWE-119
BufCompressedFill (BufFilePtr f) { CompressedFile *file; register char_type *stackp, *de_stack; register char_type finchar; register code_int code, oldcode, incode; BufChar *buf, *bufend; file = (CompressedFile *) f->private; buf = f->buffer; bufend = buf + BUFFILESIZE; stackp = file->stackp; de_stack = file->de_stack; finchar = file->finchar; oldcode = file->oldcode; while (buf < bufend) { while (stackp > de_stack && buf < bufend) *buf++ = *--stackp; if (buf == bufend) break; if (oldcode == -1) break; code = getcode (file); if (code == -1) break; if ( (code == CLEAR) && file->block_compress ) { for ( code = 255; code >= 0; code-- ) file->tab_prefix[code] = 0; file->clear_flg = 1; file->free_ent = FIRST - 1; if ( (code = getcode (file)) == -1 ) /* O, untimely death! */ break; } incode = code; /* * Special case for KwKwK string. */ if ( code >= file->free_ent ) { *stackp++ = finchar; code = oldcode; } /* * Generate output characters in reverse order */ while ( code >= 256 ) { if (stackp - de_stack >= STACK_SIZE - 1) return BUFFILEEOF; *stackp++ = file->tab_suffix[code]; code = file->tab_prefix[code]; } /* * Generate the new entry. */ if ( (code=file->free_ent) < file->maxmaxcode ) { file->tab_prefix[code] = (unsigned short)oldcode; file->tab_suffix[code] = finchar; file->free_ent = code+1; } /* * Remember previous code. */ oldcode = incode; } file->oldcode = oldcode; file->stackp = stackp; file->finchar = finchar; if (buf == f->buffer) { f->left = 0; return BUFFILEEOF; } f->bufp = f->buffer + 1; f->left = (buf - f->buffer) - 1; return f->buffer[0]; }
164,651
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::SubmitDecode( const scoped_refptr<VP8Picture>& pic, const Vp8FrameHeader* frame_hdr, const scoped_refptr<VP8Picture>& last_frame, const scoped_refptr<VP8Picture>& golden_frame, const scoped_refptr<VP8Picture>& alt_frame) { VAIQMatrixBufferVP8 iq_matrix_buf; memset(&iq_matrix_buf, 0, sizeof(VAIQMatrixBufferVP8)); const Vp8SegmentationHeader& sgmnt_hdr = frame_hdr->segmentation_hdr; const Vp8QuantizationHeader& quant_hdr = frame_hdr->quantization_hdr; static_assert(arraysize(iq_matrix_buf.quantization_index) == kMaxMBSegments, "incorrect quantization matrix size"); for (size_t i = 0; i < kMaxMBSegments; ++i) { int q = quant_hdr.y_ac_qi; if (sgmnt_hdr.segmentation_enabled) { if (sgmnt_hdr.segment_feature_mode == Vp8SegmentationHeader::FEATURE_MODE_ABSOLUTE) q = sgmnt_hdr.quantizer_update_value[i]; else q += sgmnt_hdr.quantizer_update_value[i]; } #define CLAMP_Q(q) std::min(std::max(q, 0), 127) static_assert(arraysize(iq_matrix_buf.quantization_index[i]) == 6, "incorrect quantization matrix size"); iq_matrix_buf.quantization_index[i][0] = CLAMP_Q(q); iq_matrix_buf.quantization_index[i][1] = CLAMP_Q(q + quant_hdr.y_dc_delta); iq_matrix_buf.quantization_index[i][2] = CLAMP_Q(q + quant_hdr.y2_dc_delta); iq_matrix_buf.quantization_index[i][3] = CLAMP_Q(q + quant_hdr.y2_ac_delta); iq_matrix_buf.quantization_index[i][4] = CLAMP_Q(q + quant_hdr.uv_dc_delta); iq_matrix_buf.quantization_index[i][5] = CLAMP_Q(q + quant_hdr.uv_ac_delta); #undef CLAMP_Q } if (!vaapi_wrapper_->SubmitBuffer( VAIQMatrixBufferType, sizeof(VAIQMatrixBufferVP8), &iq_matrix_buf)) return false; VAProbabilityDataBufferVP8 prob_buf; memset(&prob_buf, 0, sizeof(VAProbabilityDataBufferVP8)); const Vp8EntropyHeader& entr_hdr = frame_hdr->entropy_hdr; ARRAY_MEMCPY_CHECKED(prob_buf.dct_coeff_probs, entr_hdr.coeff_probs); if (!vaapi_wrapper_->SubmitBuffer(VAProbabilityBufferType, sizeof(VAProbabilityDataBufferVP8), &prob_buf)) return false; VAPictureParameterBufferVP8 pic_param; memset(&pic_param, 0, sizeof(VAPictureParameterBufferVP8)); pic_param.frame_width = frame_hdr->width; pic_param.frame_height = frame_hdr->height; if (last_frame) { scoped_refptr<VaapiDecodeSurface> last_frame_surface = VP8PictureToVaapiDecodeSurface(last_frame); pic_param.last_ref_frame = last_frame_surface->va_surface()->id(); } else { pic_param.last_ref_frame = VA_INVALID_SURFACE; } if (golden_frame) { scoped_refptr<VaapiDecodeSurface> golden_frame_surface = VP8PictureToVaapiDecodeSurface(golden_frame); pic_param.golden_ref_frame = golden_frame_surface->va_surface()->id(); } else { pic_param.golden_ref_frame = VA_INVALID_SURFACE; } if (alt_frame) { scoped_refptr<VaapiDecodeSurface> alt_frame_surface = VP8PictureToVaapiDecodeSurface(alt_frame); pic_param.alt_ref_frame = alt_frame_surface->va_surface()->id(); } else { pic_param.alt_ref_frame = VA_INVALID_SURFACE; } pic_param.out_of_loop_frame = VA_INVALID_SURFACE; const Vp8LoopFilterHeader& lf_hdr = frame_hdr->loopfilter_hdr; #define FHDR_TO_PP_PF(a, b) pic_param.pic_fields.bits.a = (b) FHDR_TO_PP_PF(key_frame, frame_hdr->IsKeyframe() ? 0 : 1); FHDR_TO_PP_PF(version, frame_hdr->version); FHDR_TO_PP_PF(segmentation_enabled, sgmnt_hdr.segmentation_enabled); FHDR_TO_PP_PF(update_mb_segmentation_map, sgmnt_hdr.update_mb_segmentation_map); FHDR_TO_PP_PF(update_segment_feature_data, sgmnt_hdr.update_segment_feature_data); FHDR_TO_PP_PF(filter_type, lf_hdr.type); FHDR_TO_PP_PF(sharpness_level, lf_hdr.sharpness_level); FHDR_TO_PP_PF(loop_filter_adj_enable, lf_hdr.loop_filter_adj_enable); FHDR_TO_PP_PF(mode_ref_lf_delta_update, lf_hdr.mode_ref_lf_delta_update); FHDR_TO_PP_PF(sign_bias_golden, frame_hdr->sign_bias_golden); FHDR_TO_PP_PF(sign_bias_alternate, frame_hdr->sign_bias_alternate); FHDR_TO_PP_PF(mb_no_coeff_skip, frame_hdr->mb_no_skip_coeff); FHDR_TO_PP_PF(loop_filter_disable, lf_hdr.level == 0); #undef FHDR_TO_PP_PF ARRAY_MEMCPY_CHECKED(pic_param.mb_segment_tree_probs, sgmnt_hdr.segment_prob); static_assert(arraysize(sgmnt_hdr.lf_update_value) == arraysize(pic_param.loop_filter_level), "loop filter level arrays mismatch"); for (size_t i = 0; i < arraysize(sgmnt_hdr.lf_update_value); ++i) { int lf_level = lf_hdr.level; if (sgmnt_hdr.segmentation_enabled) { if (sgmnt_hdr.segment_feature_mode == Vp8SegmentationHeader::FEATURE_MODE_ABSOLUTE) lf_level = sgmnt_hdr.lf_update_value[i]; else lf_level += sgmnt_hdr.lf_update_value[i]; } lf_level = std::min(std::max(lf_level, 0), 63); pic_param.loop_filter_level[i] = lf_level; } static_assert( arraysize(lf_hdr.ref_frame_delta) == arraysize(pic_param.loop_filter_deltas_ref_frame) && arraysize(lf_hdr.mb_mode_delta) == arraysize(pic_param.loop_filter_deltas_mode) && arraysize(lf_hdr.ref_frame_delta) == arraysize(lf_hdr.mb_mode_delta), "loop filter deltas arrays size mismatch"); for (size_t i = 0; i < arraysize(lf_hdr.ref_frame_delta); ++i) { pic_param.loop_filter_deltas_ref_frame[i] = lf_hdr.ref_frame_delta[i]; pic_param.loop_filter_deltas_mode[i] = lf_hdr.mb_mode_delta[i]; } #define FHDR_TO_PP(a) pic_param.a = frame_hdr->a FHDR_TO_PP(prob_skip_false); FHDR_TO_PP(prob_intra); FHDR_TO_PP(prob_last); FHDR_TO_PP(prob_gf); #undef FHDR_TO_PP ARRAY_MEMCPY_CHECKED(pic_param.y_mode_probs, entr_hdr.y_mode_probs); ARRAY_MEMCPY_CHECKED(pic_param.uv_mode_probs, entr_hdr.uv_mode_probs); ARRAY_MEMCPY_CHECKED(pic_param.mv_probs, entr_hdr.mv_probs); pic_param.bool_coder_ctx.range = frame_hdr->bool_dec_range; pic_param.bool_coder_ctx.value = frame_hdr->bool_dec_value; pic_param.bool_coder_ctx.count = frame_hdr->bool_dec_count; if (!vaapi_wrapper_->SubmitBuffer(VAPictureParameterBufferType, sizeof(pic_param), &pic_param)) return false; VASliceParameterBufferVP8 slice_param; memset(&slice_param, 0, sizeof(slice_param)); slice_param.slice_data_size = frame_hdr->frame_size; slice_param.slice_data_offset = frame_hdr->first_part_offset; slice_param.slice_data_flag = VA_SLICE_DATA_FLAG_ALL; slice_param.macroblock_offset = frame_hdr->macroblock_bit_offset; slice_param.num_of_partitions = frame_hdr->num_of_dct_partitions + 1; slice_param.partition_size[0] = frame_hdr->first_part_size - ((frame_hdr->macroblock_bit_offset + 7) / 8); for (size_t i = 0; i < frame_hdr->num_of_dct_partitions; ++i) slice_param.partition_size[i + 1] = frame_hdr->dct_partition_sizes[i]; if (!vaapi_wrapper_->SubmitBuffer(VASliceParameterBufferType, sizeof(VASliceParameterBufferVP8), &slice_param)) return false; void* non_const_ptr = const_cast<uint8_t*>(frame_hdr->data); if (!vaapi_wrapper_->SubmitBuffer(VASliceDataBufferType, frame_hdr->frame_size, non_const_ptr)) return false; scoped_refptr<VaapiDecodeSurface> dec_surface = VP8PictureToVaapiDecodeSurface(pic); return vaapi_dec_->DecodeSurface(dec_surface); } Commit Message: vaapi vda: Delete owned objects on worker thread in Cleanup() This CL adds a SEQUENCE_CHECKER to Vaapi*Accelerator classes, and posts the destruction of those objects to the appropriate thread on Cleanup(). Also makes {H264,VP8,VP9}Picture RefCountedThreadSafe, see miu@ comment in https://chromium-review.googlesource.com/c/chromium/src/+/794091#message-a64bed985cfaf8a19499a517bb110a7ce581dc0f TEST=play back VP9/VP8/H264 w/ simplechrome on soraka, Release build unstripped, let video play for a few seconds then navigate back; no crashes. Unittests as before: video_decode_accelerator_unittest --test_video_data=test-25fps.vp9:320:240:250:250:35:150:12 video_decode_accelerator_unittest --test_video_data=test-25fps.vp8:320:240:250:250:35:150:11 video_decode_accelerator_unittest --test_video_data=test-25fps.h264:320:240:250:258:35:150:1 Bug: 789160 Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel Change-Id: I7d96aaf89c92bf46f00c8b8b36798e057a842ed2 Reviewed-on: https://chromium-review.googlesource.com/794091 Reviewed-by: Pawel Osciak <[email protected]> Commit-Queue: Miguel Casas <[email protected]> Cr-Commit-Position: refs/heads/master@{#523372} CWE ID: CWE-362
bool VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::SubmitDecode( const scoped_refptr<VP8Picture>& pic, const Vp8FrameHeader* frame_hdr, const scoped_refptr<VP8Picture>& last_frame, const scoped_refptr<VP8Picture>& golden_frame, const scoped_refptr<VP8Picture>& alt_frame) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); VAIQMatrixBufferVP8 iq_matrix_buf; memset(&iq_matrix_buf, 0, sizeof(VAIQMatrixBufferVP8)); const Vp8SegmentationHeader& sgmnt_hdr = frame_hdr->segmentation_hdr; const Vp8QuantizationHeader& quant_hdr = frame_hdr->quantization_hdr; static_assert(arraysize(iq_matrix_buf.quantization_index) == kMaxMBSegments, "incorrect quantization matrix size"); for (size_t i = 0; i < kMaxMBSegments; ++i) { int q = quant_hdr.y_ac_qi; if (sgmnt_hdr.segmentation_enabled) { if (sgmnt_hdr.segment_feature_mode == Vp8SegmentationHeader::FEATURE_MODE_ABSOLUTE) q = sgmnt_hdr.quantizer_update_value[i]; else q += sgmnt_hdr.quantizer_update_value[i]; } #define CLAMP_Q(q) std::min(std::max(q, 0), 127) static_assert(arraysize(iq_matrix_buf.quantization_index[i]) == 6, "incorrect quantization matrix size"); iq_matrix_buf.quantization_index[i][0] = CLAMP_Q(q); iq_matrix_buf.quantization_index[i][1] = CLAMP_Q(q + quant_hdr.y_dc_delta); iq_matrix_buf.quantization_index[i][2] = CLAMP_Q(q + quant_hdr.y2_dc_delta); iq_matrix_buf.quantization_index[i][3] = CLAMP_Q(q + quant_hdr.y2_ac_delta); iq_matrix_buf.quantization_index[i][4] = CLAMP_Q(q + quant_hdr.uv_dc_delta); iq_matrix_buf.quantization_index[i][5] = CLAMP_Q(q + quant_hdr.uv_ac_delta); #undef CLAMP_Q } if (!vaapi_wrapper_->SubmitBuffer( VAIQMatrixBufferType, sizeof(VAIQMatrixBufferVP8), &iq_matrix_buf)) return false; VAProbabilityDataBufferVP8 prob_buf; memset(&prob_buf, 0, sizeof(VAProbabilityDataBufferVP8)); const Vp8EntropyHeader& entr_hdr = frame_hdr->entropy_hdr; ARRAY_MEMCPY_CHECKED(prob_buf.dct_coeff_probs, entr_hdr.coeff_probs); if (!vaapi_wrapper_->SubmitBuffer(VAProbabilityBufferType, sizeof(VAProbabilityDataBufferVP8), &prob_buf)) return false; VAPictureParameterBufferVP8 pic_param; memset(&pic_param, 0, sizeof(VAPictureParameterBufferVP8)); pic_param.frame_width = frame_hdr->width; pic_param.frame_height = frame_hdr->height; if (last_frame) { scoped_refptr<VaapiDecodeSurface> last_frame_surface = VP8PictureToVaapiDecodeSurface(last_frame); pic_param.last_ref_frame = last_frame_surface->va_surface()->id(); } else { pic_param.last_ref_frame = VA_INVALID_SURFACE; } if (golden_frame) { scoped_refptr<VaapiDecodeSurface> golden_frame_surface = VP8PictureToVaapiDecodeSurface(golden_frame); pic_param.golden_ref_frame = golden_frame_surface->va_surface()->id(); } else { pic_param.golden_ref_frame = VA_INVALID_SURFACE; } if (alt_frame) { scoped_refptr<VaapiDecodeSurface> alt_frame_surface = VP8PictureToVaapiDecodeSurface(alt_frame); pic_param.alt_ref_frame = alt_frame_surface->va_surface()->id(); } else { pic_param.alt_ref_frame = VA_INVALID_SURFACE; } pic_param.out_of_loop_frame = VA_INVALID_SURFACE; const Vp8LoopFilterHeader& lf_hdr = frame_hdr->loopfilter_hdr; #define FHDR_TO_PP_PF(a, b) pic_param.pic_fields.bits.a = (b) FHDR_TO_PP_PF(key_frame, frame_hdr->IsKeyframe() ? 0 : 1); FHDR_TO_PP_PF(version, frame_hdr->version); FHDR_TO_PP_PF(segmentation_enabled, sgmnt_hdr.segmentation_enabled); FHDR_TO_PP_PF(update_mb_segmentation_map, sgmnt_hdr.update_mb_segmentation_map); FHDR_TO_PP_PF(update_segment_feature_data, sgmnt_hdr.update_segment_feature_data); FHDR_TO_PP_PF(filter_type, lf_hdr.type); FHDR_TO_PP_PF(sharpness_level, lf_hdr.sharpness_level); FHDR_TO_PP_PF(loop_filter_adj_enable, lf_hdr.loop_filter_adj_enable); FHDR_TO_PP_PF(mode_ref_lf_delta_update, lf_hdr.mode_ref_lf_delta_update); FHDR_TO_PP_PF(sign_bias_golden, frame_hdr->sign_bias_golden); FHDR_TO_PP_PF(sign_bias_alternate, frame_hdr->sign_bias_alternate); FHDR_TO_PP_PF(mb_no_coeff_skip, frame_hdr->mb_no_skip_coeff); FHDR_TO_PP_PF(loop_filter_disable, lf_hdr.level == 0); #undef FHDR_TO_PP_PF ARRAY_MEMCPY_CHECKED(pic_param.mb_segment_tree_probs, sgmnt_hdr.segment_prob); static_assert(arraysize(sgmnt_hdr.lf_update_value) == arraysize(pic_param.loop_filter_level), "loop filter level arrays mismatch"); for (size_t i = 0; i < arraysize(sgmnt_hdr.lf_update_value); ++i) { int lf_level = lf_hdr.level; if (sgmnt_hdr.segmentation_enabled) { if (sgmnt_hdr.segment_feature_mode == Vp8SegmentationHeader::FEATURE_MODE_ABSOLUTE) lf_level = sgmnt_hdr.lf_update_value[i]; else lf_level += sgmnt_hdr.lf_update_value[i]; } lf_level = std::min(std::max(lf_level, 0), 63); pic_param.loop_filter_level[i] = lf_level; } static_assert( arraysize(lf_hdr.ref_frame_delta) == arraysize(pic_param.loop_filter_deltas_ref_frame) && arraysize(lf_hdr.mb_mode_delta) == arraysize(pic_param.loop_filter_deltas_mode) && arraysize(lf_hdr.ref_frame_delta) == arraysize(lf_hdr.mb_mode_delta), "loop filter deltas arrays size mismatch"); for (size_t i = 0; i < arraysize(lf_hdr.ref_frame_delta); ++i) { pic_param.loop_filter_deltas_ref_frame[i] = lf_hdr.ref_frame_delta[i]; pic_param.loop_filter_deltas_mode[i] = lf_hdr.mb_mode_delta[i]; } #define FHDR_TO_PP(a) pic_param.a = frame_hdr->a FHDR_TO_PP(prob_skip_false); FHDR_TO_PP(prob_intra); FHDR_TO_PP(prob_last); FHDR_TO_PP(prob_gf); #undef FHDR_TO_PP ARRAY_MEMCPY_CHECKED(pic_param.y_mode_probs, entr_hdr.y_mode_probs); ARRAY_MEMCPY_CHECKED(pic_param.uv_mode_probs, entr_hdr.uv_mode_probs); ARRAY_MEMCPY_CHECKED(pic_param.mv_probs, entr_hdr.mv_probs); pic_param.bool_coder_ctx.range = frame_hdr->bool_dec_range; pic_param.bool_coder_ctx.value = frame_hdr->bool_dec_value; pic_param.bool_coder_ctx.count = frame_hdr->bool_dec_count; if (!vaapi_wrapper_->SubmitBuffer(VAPictureParameterBufferType, sizeof(pic_param), &pic_param)) return false; VASliceParameterBufferVP8 slice_param; memset(&slice_param, 0, sizeof(slice_param)); slice_param.slice_data_size = frame_hdr->frame_size; slice_param.slice_data_offset = frame_hdr->first_part_offset; slice_param.slice_data_flag = VA_SLICE_DATA_FLAG_ALL; slice_param.macroblock_offset = frame_hdr->macroblock_bit_offset; slice_param.num_of_partitions = frame_hdr->num_of_dct_partitions + 1; slice_param.partition_size[0] = frame_hdr->first_part_size - ((frame_hdr->macroblock_bit_offset + 7) / 8); for (size_t i = 0; i < frame_hdr->num_of_dct_partitions; ++i) slice_param.partition_size[i + 1] = frame_hdr->dct_partition_sizes[i]; if (!vaapi_wrapper_->SubmitBuffer(VASliceParameterBufferType, sizeof(VASliceParameterBufferVP8), &slice_param)) return false; void* non_const_ptr = const_cast<uint8_t*>(frame_hdr->data); if (!vaapi_wrapper_->SubmitBuffer(VASliceDataBufferType, frame_hdr->frame_size, non_const_ptr)) return false; scoped_refptr<VaapiDecodeSurface> dec_surface = VP8PictureToVaapiDecodeSurface(pic); return vaapi_dec_->DecodeSurface(dec_surface); }
172,809