instruction
stringclasses
1 value
input
stringlengths
90
139k
output
stringlengths
16
138k
__index_level_0__
int64
165k
175k
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void InputHandlerProxy::DispatchSingleInputEvent( std::unique_ptr<EventWithCallback> event_with_callback, const base::TimeTicks now) { const ui::LatencyInfo& original_latency_info = event_with_callback->latency_info(); ui::LatencyInfo monitored_latency_info = original_latency_info; std::unique_ptr<cc::SwapPromiseMonitor> latency_info_swap_promise_monitor = input_handler_->CreateLatencyInfoSwapPromiseMonitor( &monitored_latency_info); current_overscroll_params_.reset(); InputHandlerProxy::EventDisposition disposition = RouteToTypeSpecificHandler( event_with_callback->event(), original_latency_info); blink::WebGestureEvent::Type type = event_with_callback->event().GetType(); switch (type) { case blink::WebGestureEvent::kGestureScrollBegin: is_first_gesture_scroll_update_ = true; FALLTHROUGH; case blink::WebGestureEvent::kGesturePinchBegin: case blink::WebGestureEvent::kGestureScrollUpdate: case blink::WebGestureEvent::kGesturePinchUpdate: has_ongoing_compositor_scroll_or_pinch_ = disposition == DID_HANDLE; break; case blink::WebGestureEvent::kGestureScrollEnd: case blink::WebGestureEvent::kGesturePinchEnd: has_ongoing_compositor_scroll_or_pinch_ = false; break; default: break; } switch (type) { case blink::WebGestureEvent::kGestureScrollBegin: momentum_scroll_jank_tracker_ = std::make_unique<MomentumScrollJankTracker>(); break; case blink::WebGestureEvent::kGestureScrollUpdate: if (momentum_scroll_jank_tracker_) { momentum_scroll_jank_tracker_->OnDispatchedInputEvent( event_with_callback.get(), now); } break; case blink::WebGestureEvent::kGestureScrollEnd: momentum_scroll_jank_tracker_.reset(); break; default: break; } event_with_callback->RunCallbacks(disposition, monitored_latency_info, std::move(current_overscroll_params_)); } Commit Message: Revert "Add explicit flag for compositor scrollbar injected gestures" This reverts commit d9a56afcbdf9850bc39bb3edb56d07d11a1eb2b2. Reason for revert: Findit (https://goo.gl/kROfz5) identified CL at revision 669086 as the culprit for flakes in the build cycles as shown on: https://analysis.chromium.org/p/chromium/flake-portal/analysis/culprit?key=ag9zfmZpbmRpdC1mb3ItbWVyQwsSDEZsYWtlQ3VscHJpdCIxY2hyb21pdW0vZDlhNTZhZmNiZGY5ODUwYmMzOWJiM2VkYjU2ZDA3ZDExYTFlYjJiMgw Sample Failed Build: https://ci.chromium.org/buildbot/chromium.chromiumos/linux-chromeos-rel/25818 Sample Failed Step: content_browsertests on Ubuntu-16.04 Sample Flaky Test: ScrollLatencyScrollbarBrowserTest.ScrollbarThumbDragLatency Original change's description: > Add explicit flag for compositor scrollbar injected gestures > > The original change to enable scrollbar latency for the composited > scrollbars incorrectly used an existing member to try and determine > whether a GestureScrollUpdate was the first one in an injected sequence > or not. is_first_gesture_scroll_update_ was incorrect because it is only > updated when input is actually dispatched to InputHandlerProxy, and the > flag is cleared for all GSUs before the location where it was being > read. > > This bug was missed because of incorrect tests. The > VerifyRecordedSamplesForHistogram method doesn't actually assert or > expect anything - the return value must be inspected. > > As part of fixing up the tests, I made a few other changes to get them > passing consistently across all platforms: > - turn on main thread scrollbar injection feature (in case it's ever > turned off we don't want the tests to start failing) > - enable mock scrollbars > - disable smooth scrolling > - don't run scrollbar tests on Android > > The composited scrollbar button test is disabled due to a bug in how > the mock theme reports its button sizes, which throws off the region > detection in ScrollbarLayerImplBase::IdentifyScrollbarPart (filed > crbug.com/974063 for this issue). > > Change-Id: Ie1a762a5f6ecc264d22f0256db68f141fc76b950 > > Bug: 954007 > Change-Id: Ib258e08e083e79da90ba2e4e4216e4879cf00cf7 > Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1652741 > Commit-Queue: Daniel Libby <[email protected]> > Reviewed-by: David Bokan <[email protected]> > Cr-Commit-Position: refs/heads/master@{#669086} Change-Id: Icc743e48fa740fe27f0cb0cfa21b209a696f518c No-Presubmit: true No-Tree-Checks: true No-Try: true Bug: 954007 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1660114 Cr-Commit-Position: refs/heads/master@{#669150} CWE ID: CWE-281
void InputHandlerProxy::DispatchSingleInputEvent( std::unique_ptr<EventWithCallback> event_with_callback, const base::TimeTicks now) { const ui::LatencyInfo& original_latency_info = event_with_callback->latency_info(); ui::LatencyInfo monitored_latency_info = original_latency_info; std::unique_ptr<cc::SwapPromiseMonitor> latency_info_swap_promise_monitor = input_handler_->CreateLatencyInfoSwapPromiseMonitor( &monitored_latency_info); current_overscroll_params_.reset(); blink::WebGestureEvent::Type type = event_with_callback->event().GetType(); if (type == blink::WebGestureEvent::kGestureScrollUpdate) { EnsureScrollUpdateLatencyComponent( &monitored_latency_info, event_with_callback->event().TimeStamp()); } InputHandlerProxy::EventDisposition disposition = RouteToTypeSpecificHandler( event_with_callback->event(), original_latency_info); switch (type) { case blink::WebGestureEvent::kGestureScrollBegin: is_first_gesture_scroll_update_ = true; FALLTHROUGH; case blink::WebGestureEvent::kGesturePinchBegin: case blink::WebGestureEvent::kGestureScrollUpdate: case blink::WebGestureEvent::kGesturePinchUpdate: has_ongoing_compositor_scroll_or_pinch_ = disposition == DID_HANDLE; break; case blink::WebGestureEvent::kGestureScrollEnd: case blink::WebGestureEvent::kGesturePinchEnd: has_ongoing_compositor_scroll_or_pinch_ = false; break; default: break; } switch (type) { case blink::WebGestureEvent::kGestureScrollBegin: momentum_scroll_jank_tracker_ = std::make_unique<MomentumScrollJankTracker>(); break; case blink::WebGestureEvent::kGestureScrollUpdate: if (momentum_scroll_jank_tracker_) { momentum_scroll_jank_tracker_->OnDispatchedInputEvent( event_with_callback.get(), now); } break; case blink::WebGestureEvent::kGestureScrollEnd: momentum_scroll_jank_tracker_.reset(); break; default: break; } event_with_callback->RunCallbacks(disposition, monitored_latency_info, std::move(current_overscroll_params_)); }
172,431
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: long Segment::ParseNext(const Cluster* pCurr, const Cluster*& pResult, long long& pos, long& len) { assert(pCurr); assert(!pCurr->EOS()); assert(m_clusters); pResult = 0; if (pCurr->m_index >= 0) { // loaded (not merely preloaded) assert(m_clusters[pCurr->m_index] == pCurr); const long next_idx = pCurr->m_index + 1; if (next_idx < m_clusterCount) { pResult = m_clusters[next_idx]; return 0; // success } const long result = LoadCluster(pos, len); if (result < 0) // error or underflow return result; if (result > 0) // no more clusters { return 1; } pResult = GetLast(); return 0; // success } assert(m_pos > 0); long long total, avail; long status = m_pReader->Length(&total, &avail); if (status < 0) // error return status; assert((total < 0) || (avail <= total)); const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size; pos = pCurr->m_element_start; if (pCurr->m_element_size >= 0) pos += pCurr->m_element_size; else { if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long id = ReadUInt(m_pReader, pos, len); if (id != 0x0F43B675) // weird: not Cluster ID return -1; pos += len; // consume ID if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long size = ReadUInt(m_pReader, pos, len); if (size < 0) // error return static_cast<long>(size); pos += len; // consume size field const long long unknown_size = (1LL << (7 * len)) - 1; if (size == unknown_size) // TODO: should never happen return E_FILE_FORMAT_INVALID; // TODO: resolve this if ((segment_stop >= 0) && ((pos + size) > segment_stop)) return E_FILE_FORMAT_INVALID; pos += size; // consume payload (that is, the current cluster) assert((segment_stop < 0) || (pos <= segment_stop)); } for (;;) { const long status = DoParseNext(pResult, pos, len); if (status <= 1) return status; } } Commit Message: external/libvpx/libwebm: Update snapshot Update libwebm snapshot. This update contains security fixes from upstream. Upstream git hash: 229f49347d19b0ca0941e072b199a242ef6c5f2b BUG=23167726 Change-Id: Id3e140e7b31ae11294724b1ecfe2e9c83b4d4207 (cherry picked from commit d0281a15b3c6bd91756e453cc9398c5ef412d99a) CWE ID: CWE-20
long Segment::ParseNext(const Cluster* pCurr, const Cluster*& pResult, long long& pos, long& len) { assert(pCurr); assert(!pCurr->EOS()); assert(m_clusters); pResult = 0; if (pCurr->m_index >= 0) { // loaded (not merely preloaded) assert(m_clusters[pCurr->m_index] == pCurr); const long next_idx = pCurr->m_index + 1; if (next_idx < m_clusterCount) { pResult = m_clusters[next_idx]; return 0; // success } const long result = LoadCluster(pos, len); if (result < 0) // error or underflow return result; if (result > 0) // no more clusters { return 1; } pResult = GetLast(); return 0; // success } assert(m_pos > 0); long long total, avail; long status = m_pReader->Length(&total, &avail); if (status < 0) // error return status; assert((total < 0) || (avail <= total)); const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size; pos = pCurr->m_element_start; if (pCurr->m_element_size >= 0) pos += pCurr->m_element_size; else { if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } long long result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long id = ReadUInt(m_pReader, pos, len); if (id != 0x0F43B675) // weird: not Cluster ID return -1; pos += len; // consume ID if ((pos + 1) > avail) { len = 1; return E_BUFFER_NOT_FULL; } result = GetUIntLength(m_pReader, pos, len); if (result < 0) // error return static_cast<long>(result); if (result > 0) // weird return E_BUFFER_NOT_FULL; if ((segment_stop >= 0) && ((pos + len) > segment_stop)) return E_FILE_FORMAT_INVALID; if ((pos + len) > avail) return E_BUFFER_NOT_FULL; const long long size = ReadUInt(m_pReader, pos, len); if (size < 0) // error return static_cast<long>(size); pos += len; // consume size field const long long unknown_size = (1LL << (7 * len)) - 1; if (size == unknown_size) // TODO: should never happen return E_FILE_FORMAT_INVALID; // TODO: resolve this if ((segment_stop >= 0) && ((pos + size) > segment_stop)) return E_FILE_FORMAT_INVALID; pos += size; // consume payload (that is, the current cluster) if (segment_stop >= 0 && pos > segment_stop) return E_FILE_FORMAT_INVALID; } for (;;) { const long status = DoParseNext(pResult, pos, len); if (status <= 1) return status; } }
173,857
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: jbig2_sd_cat(Jbig2Ctx *ctx, int n_dicts, Jbig2SymbolDict **dicts) { int i, j, k, symbols; Jbig2SymbolDict *new = NULL; /* count the imported symbols and allocate a new array */ symbols = 0; for (i = 0; i < n_dicts; i++) symbols += dicts[i]->n_symbols; /* fill a new array with cloned glyph pointers */ new = jbig2_sd_new(ctx, symbols); if (new != NULL) { k = 0; for (i = 0; i < n_dicts; i++) for (j = 0; j < dicts[i]->n_symbols; j++) new->glyphs[k++] = jbig2_image_clone(ctx, dicts[i]->glyphs[j]); } else { jbig2_error(ctx, JBIG2_SEVERITY_WARNING, -1, "failed to allocate new symbol dictionary"); } return new; } Commit Message: CWE ID: CWE-119
jbig2_sd_cat(Jbig2Ctx *ctx, int n_dicts, Jbig2SymbolDict **dicts) jbig2_sd_cat(Jbig2Ctx *ctx, uint32_t n_dicts, Jbig2SymbolDict **dicts) { uint32_t i, j, k, symbols; Jbig2SymbolDict *new_dict = NULL; /* count the imported symbols and allocate a new array */ symbols = 0; for (i = 0; i < n_dicts; i++) symbols += dicts[i]->n_symbols; /* fill a new array with cloned glyph pointers */ new_dict = jbig2_sd_new(ctx, symbols); if (new_dict != NULL) { k = 0; for (i = 0; i < n_dicts; i++) for (j = 0; j < dicts[i]->n_symbols; j++) new_dict->glyphs[k++] = jbig2_image_clone(ctx, dicts[i]->glyphs[j]); } else { jbig2_error(ctx, JBIG2_SEVERITY_WARNING, -1, "failed to allocate new symbol dictionary"); } return new_dict; }
165,499
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { if (size > 512) return 0; net::ProxyBypassRules rules; std::string input(data, data + size); rules.ParseFromString(input); rules.ParseFromStringUsingSuffixMatching(input); return 0; } Commit Message: Implicitly bypass localhost when proxying requests. This aligns Chrome's behavior with the Windows and macOS proxy resolvers (but not Firefox). Concretely: * localhost names (as determined by net::IsLocalhost) now implicitly bypass the proxy * link-local IP addresses implicitly bypass the proxy The implicit rules are handled by ProxyBypassRules, and it is possible to override them when manually configuring proxy settings (but not when using PAC or auto-detect). This change also adds support for the "<-loopback>" proxy bypass rule, with similar semantics as it has on Windows (removes the implicit bypass rules for localhost and link-local). The compatibility risk of this change should be low as proxying through localhost was not universally supported. It is however an idiom used in testing (a number of our own tests had such a dependency). Impacted users can use the "<-loopback>" bypass rule as a workaround. Bug: 413511, 899126, 901896 Change-Id: I263ca21ef9f12d4759a20cb4751dc3261bda6ac0 Reviewed-on: https://chromium-review.googlesource.com/c/1303626 Commit-Queue: Eric Roman <[email protected]> Reviewed-by: Dominick Ng <[email protected]> Reviewed-by: Tarun Bansal <[email protected]> Reviewed-by: Matt Menke <[email protected]> Reviewed-by: Sami Kyöstilä <[email protected]> Cr-Commit-Position: refs/heads/master@{#606112} CWE ID: CWE-20
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { if (size > 512) return 0; net::ProxyBypassRules rules; std::string input(data, data + size); const net::ProxyBypassRules::ParseFormat kFormats[] = { net::ProxyBypassRules::ParseFormat::kDefault, net::ProxyBypassRules::ParseFormat::kHostnameSuffixMatching, }; for (auto format : kFormats) rules.ParseFromString(input, format); return 0; }
172,645
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void __intel_pmu_pebs_event(struct perf_event *event, struct pt_regs *iregs, void *__pebs) { /* * We cast to pebs_record_core since that is a subset of * both formats and we don't use the other fields in this * routine. */ struct pebs_record_core *pebs = __pebs; struct perf_sample_data data; struct pt_regs regs; if (!intel_pmu_save_and_restart(event)) return; perf_sample_data_init(&data, 0); data.period = event->hw.last_period; /* * We use the interrupt regs as a base because the PEBS record * does not contain a full regs set, specifically it seems to * lack segment descriptors, which get used by things like * user_mode(). * * In the simple case fix up only the IP and BP,SP regs, for * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly. * A possible PERF_SAMPLE_REGS will have to transfer all regs. */ regs = *iregs; regs.ip = pebs->ip; regs.bp = pebs->bp; regs.sp = pebs->sp; if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs)) regs.flags |= PERF_EFLAGS_EXACT; else regs.flags &= ~PERF_EFLAGS_EXACT; if (perf_event_overflow(event, 1, &data, &regs)) x86_pmu_stop(event, 0); } Commit Message: perf: Remove the nmi parameter from the swevent and overflow interface The nmi parameter indicated if we could do wakeups from the current context, if not, we would set some state and self-IPI and let the resulting interrupt do the wakeup. For the various event classes: - hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from the PMI-tail (ARM etc.) - tracepoint: nmi=0; since tracepoint could be from NMI context. - software: nmi=[0,1]; some, like the schedule thing cannot perform wakeups, and hence need 0. As one can see, there is very little nmi=1 usage, and the down-side of not using it is that on some platforms some software events can have a jiffy delay in wakeup (when arch_irq_work_raise isn't implemented). The up-side however is that we can remove the nmi parameter and save a bunch of conditionals in fast paths. Signed-off-by: Peter Zijlstra <[email protected]> Cc: Michael Cree <[email protected]> Cc: Will Deacon <[email protected]> Cc: Deng-Cheng Zhu <[email protected]> Cc: Anton Blanchard <[email protected]> Cc: Eric B Munson <[email protected]> Cc: Heiko Carstens <[email protected]> Cc: Paul Mundt <[email protected]> Cc: David S. Miller <[email protected]> Cc: Frederic Weisbecker <[email protected]> Cc: Jason Wessel <[email protected]> Cc: Don Zickus <[email protected]> Link: http://lkml.kernel.org/n/[email protected] Signed-off-by: Ingo Molnar <[email protected]> CWE ID: CWE-399
static void __intel_pmu_pebs_event(struct perf_event *event, struct pt_regs *iregs, void *__pebs) { /* * We cast to pebs_record_core since that is a subset of * both formats and we don't use the other fields in this * routine. */ struct pebs_record_core *pebs = __pebs; struct perf_sample_data data; struct pt_regs regs; if (!intel_pmu_save_and_restart(event)) return; perf_sample_data_init(&data, 0); data.period = event->hw.last_period; /* * We use the interrupt regs as a base because the PEBS record * does not contain a full regs set, specifically it seems to * lack segment descriptors, which get used by things like * user_mode(). * * In the simple case fix up only the IP and BP,SP regs, for * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly. * A possible PERF_SAMPLE_REGS will have to transfer all regs. */ regs = *iregs; regs.ip = pebs->ip; regs.bp = pebs->bp; regs.sp = pebs->sp; if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs)) regs.flags |= PERF_EFLAGS_EXACT; else regs.flags &= ~PERF_EFLAGS_EXACT; if (perf_event_overflow(event, &data, &regs)) x86_pmu_stop(event, 0); }
165,820
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: eap_print(netdissect_options *ndo, register const u_char *cp, u_int length) { const struct eap_frame_t *eap; const u_char *tptr; u_int tlen, type, subtype; int count=0, len; tptr = cp; tlen = length; eap = (const struct eap_frame_t *)cp; ND_TCHECK(*eap); /* in non-verbose mode just lets print the basic info */ if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "%s (%u) v%u, len %u", tok2str(eap_frame_type_values, "unknown", eap->type), eap->type, eap->version, EXTRACT_16BITS(eap->length))); return; } ND_PRINT((ndo, "%s (%u) v%u, len %u", tok2str(eap_frame_type_values, "unknown", eap->type), eap->type, eap->version, EXTRACT_16BITS(eap->length))); tptr += sizeof(const struct eap_frame_t); tlen -= sizeof(const struct eap_frame_t); switch (eap->type) { case EAP_FRAME_TYPE_PACKET: type = *(tptr); len = EXTRACT_16BITS(tptr+2); ND_PRINT((ndo, ", %s (%u), id %u, len %u", tok2str(eap_code_values, "unknown", type), type, *(tptr+1), len)); ND_TCHECK2(*tptr, len); if (type <= 2) { /* For EAP_REQUEST and EAP_RESPONSE only */ subtype = *(tptr+4); ND_PRINT((ndo, "\n\t\t Type %s (%u)", tok2str(eap_type_values, "unknown", *(tptr+4)), *(tptr + 4))); switch (subtype) { case EAP_TYPE_IDENTITY: if (len - 5 > 0) { ND_PRINT((ndo, ", Identity: ")); safeputs(ndo, tptr + 5, len - 5); } break; case EAP_TYPE_NOTIFICATION: if (len - 5 > 0) { ND_PRINT((ndo, ", Notification: ")); safeputs(ndo, tptr + 5, len - 5); } break; case EAP_TYPE_NAK: count = 5; /* * one or more octets indicating * the desired authentication * type one octet per type */ while (count < len) { ND_PRINT((ndo, " %s (%u),", tok2str(eap_type_values, "unknown", *(tptr+count)), *(tptr + count))); count++; } break; case EAP_TYPE_TTLS: ND_PRINT((ndo, " TTLSv%u", EAP_TTLS_VERSION(*(tptr + 5)))); /* fall through */ case EAP_TYPE_TLS: ND_PRINT((ndo, " flags [%s] 0x%02x,", bittok2str(eap_tls_flags_values, "none", *(tptr+5)), *(tptr + 5))); if (EAP_TLS_EXTRACT_BIT_L(*(tptr+5))) { ND_PRINT((ndo, " len %u", EXTRACT_32BITS(tptr + 6))); } break; case EAP_TYPE_FAST: ND_PRINT((ndo, " FASTv%u", EAP_TTLS_VERSION(*(tptr + 5)))); ND_PRINT((ndo, " flags [%s] 0x%02x,", bittok2str(eap_tls_flags_values, "none", *(tptr+5)), *(tptr + 5))); if (EAP_TLS_EXTRACT_BIT_L(*(tptr+5))) { ND_PRINT((ndo, " len %u", EXTRACT_32BITS(tptr + 6))); } /* FIXME - TLV attributes follow */ break; case EAP_TYPE_AKA: case EAP_TYPE_SIM: ND_PRINT((ndo, " subtype [%s] 0x%02x,", tok2str(eap_aka_subtype_values, "unknown", *(tptr+5)), *(tptr + 5))); /* FIXME - TLV attributes follow */ break; case EAP_TYPE_MD5_CHALLENGE: case EAP_TYPE_OTP: case EAP_TYPE_GTC: case EAP_TYPE_EXPANDED_TYPES: case EAP_TYPE_EXPERIMENTAL: default: break; } } break; case EAP_FRAME_TYPE_LOGOFF: case EAP_FRAME_TYPE_ENCAP_ASF_ALERT: default: break; } return; trunc: ND_PRINT((ndo, "\n\t[|EAP]")); } Commit Message: CVE-2017-13015/EAP: Add more bounds checks. This fixes a buffer over-read discovered by Bhargava Shastry, SecT/TU Berlin. Add a test using the capture file supplied by the reporter(s), modified so the capture file won't be rejected as an invalid capture. CWE ID: CWE-125
eap_print(netdissect_options *ndo, register const u_char *cp, u_int length) { const struct eap_frame_t *eap; const u_char *tptr; u_int tlen, type, subtype; int count=0, len; tptr = cp; tlen = length; eap = (const struct eap_frame_t *)cp; ND_TCHECK(*eap); /* in non-verbose mode just lets print the basic info */ if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "%s (%u) v%u, len %u", tok2str(eap_frame_type_values, "unknown", eap->type), eap->type, eap->version, EXTRACT_16BITS(eap->length))); return; } ND_PRINT((ndo, "%s (%u) v%u, len %u", tok2str(eap_frame_type_values, "unknown", eap->type), eap->type, eap->version, EXTRACT_16BITS(eap->length))); tptr += sizeof(const struct eap_frame_t); tlen -= sizeof(const struct eap_frame_t); switch (eap->type) { case EAP_FRAME_TYPE_PACKET: ND_TCHECK_8BITS(tptr); type = *(tptr); ND_TCHECK_16BITS(tptr+2); len = EXTRACT_16BITS(tptr+2); ND_PRINT((ndo, ", %s (%u), id %u, len %u", tok2str(eap_code_values, "unknown", type), type, *(tptr+1), len)); ND_TCHECK2(*tptr, len); if (type <= 2) { /* For EAP_REQUEST and EAP_RESPONSE only */ ND_TCHECK_8BITS(tptr+4); subtype = *(tptr+4); ND_PRINT((ndo, "\n\t\t Type %s (%u)", tok2str(eap_type_values, "unknown", subtype), subtype)); switch (subtype) { case EAP_TYPE_IDENTITY: if (len - 5 > 0) { ND_PRINT((ndo, ", Identity: ")); safeputs(ndo, tptr + 5, len - 5); } break; case EAP_TYPE_NOTIFICATION: if (len - 5 > 0) { ND_PRINT((ndo, ", Notification: ")); safeputs(ndo, tptr + 5, len - 5); } break; case EAP_TYPE_NAK: count = 5; /* * one or more octets indicating * the desired authentication * type one octet per type */ while (count < len) { ND_TCHECK_8BITS(tptr+count); ND_PRINT((ndo, " %s (%u),", tok2str(eap_type_values, "unknown", *(tptr+count)), *(tptr + count))); count++; } break; case EAP_TYPE_TTLS: case EAP_TYPE_TLS: ND_TCHECK_8BITS(tptr + 5); if (subtype == EAP_TYPE_TTLS) ND_PRINT((ndo, " TTLSv%u", EAP_TTLS_VERSION(*(tptr + 5)))); ND_PRINT((ndo, " flags [%s] 0x%02x,", bittok2str(eap_tls_flags_values, "none", *(tptr+5)), *(tptr + 5))); if (EAP_TLS_EXTRACT_BIT_L(*(tptr+5))) { ND_TCHECK_32BITS(tptr + 6); ND_PRINT((ndo, " len %u", EXTRACT_32BITS(tptr + 6))); } break; case EAP_TYPE_FAST: ND_TCHECK_8BITS(tptr + 5); ND_PRINT((ndo, " FASTv%u", EAP_TTLS_VERSION(*(tptr + 5)))); ND_PRINT((ndo, " flags [%s] 0x%02x,", bittok2str(eap_tls_flags_values, "none", *(tptr+5)), *(tptr + 5))); if (EAP_TLS_EXTRACT_BIT_L(*(tptr+5))) { ND_TCHECK_32BITS(tptr + 6); ND_PRINT((ndo, " len %u", EXTRACT_32BITS(tptr + 6))); } /* FIXME - TLV attributes follow */ break; case EAP_TYPE_AKA: case EAP_TYPE_SIM: ND_TCHECK_8BITS(tptr + 5); ND_PRINT((ndo, " subtype [%s] 0x%02x,", tok2str(eap_aka_subtype_values, "unknown", *(tptr+5)), *(tptr + 5))); /* FIXME - TLV attributes follow */ break; case EAP_TYPE_MD5_CHALLENGE: case EAP_TYPE_OTP: case EAP_TYPE_GTC: case EAP_TYPE_EXPANDED_TYPES: case EAP_TYPE_EXPERIMENTAL: default: break; } } break; case EAP_FRAME_TYPE_LOGOFF: case EAP_FRAME_TYPE_ENCAP_ASF_ALERT: default: break; } return; trunc: ND_PRINT((ndo, "\n\t[|EAP]")); }
167,877
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: IPV6BuildTestPacket(uint32_t id, uint16_t off, int mf, const char content, int content_len) { Packet *p = NULL; uint8_t *pcontent; IPV6Hdr ip6h; p = SCCalloc(1, sizeof(*p) + default_packet_size); if (unlikely(p == NULL)) return NULL; PACKET_INITIALIZE(p); gettimeofday(&p->ts, NULL); ip6h.s_ip6_nxt = 44; ip6h.s_ip6_hlim = 2; /* Source and dest address - very bogus addresses. */ ip6h.s_ip6_src[0] = 0x01010101; ip6h.s_ip6_src[1] = 0x01010101; ip6h.s_ip6_src[2] = 0x01010101; ip6h.s_ip6_src[3] = 0x01010101; ip6h.s_ip6_dst[0] = 0x02020202; ip6h.s_ip6_dst[1] = 0x02020202; ip6h.s_ip6_dst[2] = 0x02020202; ip6h.s_ip6_dst[3] = 0x02020202; /* copy content_len crap, we need full length */ PacketCopyData(p, (uint8_t *)&ip6h, sizeof(IPV6Hdr)); p->ip6h = (IPV6Hdr *)GET_PKT_DATA(p); IPV6_SET_RAW_VER(p->ip6h, 6); /* Fragmentation header. */ IPV6FragHdr *fh = (IPV6FragHdr *)(GET_PKT_DATA(p) + sizeof(IPV6Hdr)); fh->ip6fh_nxt = IPPROTO_ICMP; fh->ip6fh_ident = htonl(id); fh->ip6fh_offlg = htons((off << 3) | mf); DecodeIPV6FragHeader(p, (uint8_t *)fh, 8, 8 + content_len, 0); pcontent = SCCalloc(1, content_len); if (unlikely(pcontent == NULL)) return NULL; memset(pcontent, content, content_len); PacketCopyDataOffset(p, sizeof(IPV6Hdr) + sizeof(IPV6FragHdr), pcontent, content_len); SET_PKT_LEN(p, sizeof(IPV6Hdr) + sizeof(IPV6FragHdr) + content_len); SCFree(pcontent); p->ip6h->s_ip6_plen = htons(sizeof(IPV6FragHdr) + content_len); SET_IPV6_SRC_ADDR(p, &p->src); SET_IPV6_DST_ADDR(p, &p->dst); /* Self test. */ if (IPV6_GET_VER(p) != 6) goto error; if (IPV6_GET_NH(p) != 44) goto error; if (IPV6_GET_PLEN(p) != sizeof(IPV6FragHdr) + content_len) goto error; return p; error: fprintf(stderr, "Error building test packet.\n"); if (p != NULL) SCFree(p); return NULL; } Commit Message: defrag - take protocol into account during re-assembly The IP protocol was not being used to match fragments with their packets allowing a carefully constructed packet with a different protocol to be matched, allowing re-assembly to complete, creating a packet that would not be re-assembled by the destination host. CWE ID: CWE-358
IPV6BuildTestPacket(uint32_t id, uint16_t off, int mf, const char content, IPV6BuildTestPacket(uint8_t proto, uint32_t id, uint16_t off, int mf, const char content, int content_len) { Packet *p = NULL; uint8_t *pcontent; IPV6Hdr ip6h; p = SCCalloc(1, sizeof(*p) + default_packet_size); if (unlikely(p == NULL)) return NULL; PACKET_INITIALIZE(p); gettimeofday(&p->ts, NULL); ip6h.s_ip6_nxt = 44; ip6h.s_ip6_hlim = 2; /* Source and dest address - very bogus addresses. */ ip6h.s_ip6_src[0] = 0x01010101; ip6h.s_ip6_src[1] = 0x01010101; ip6h.s_ip6_src[2] = 0x01010101; ip6h.s_ip6_src[3] = 0x01010101; ip6h.s_ip6_dst[0] = 0x02020202; ip6h.s_ip6_dst[1] = 0x02020202; ip6h.s_ip6_dst[2] = 0x02020202; ip6h.s_ip6_dst[3] = 0x02020202; /* copy content_len crap, we need full length */ PacketCopyData(p, (uint8_t *)&ip6h, sizeof(IPV6Hdr)); p->ip6h = (IPV6Hdr *)GET_PKT_DATA(p); IPV6_SET_RAW_VER(p->ip6h, 6); /* Fragmentation header. */ IPV6FragHdr *fh = (IPV6FragHdr *)(GET_PKT_DATA(p) + sizeof(IPV6Hdr)); fh->ip6fh_nxt = proto; fh->ip6fh_ident = htonl(id); fh->ip6fh_offlg = htons((off << 3) | mf); DecodeIPV6FragHeader(p, (uint8_t *)fh, 8, 8 + content_len, 0); pcontent = SCCalloc(1, content_len); if (unlikely(pcontent == NULL)) return NULL; memset(pcontent, content, content_len); PacketCopyDataOffset(p, sizeof(IPV6Hdr) + sizeof(IPV6FragHdr), pcontent, content_len); SET_PKT_LEN(p, sizeof(IPV6Hdr) + sizeof(IPV6FragHdr) + content_len); SCFree(pcontent); p->ip6h->s_ip6_plen = htons(sizeof(IPV6FragHdr) + content_len); SET_IPV6_SRC_ADDR(p, &p->src); SET_IPV6_DST_ADDR(p, &p->dst); /* Self test. */ if (IPV6_GET_VER(p) != 6) goto error; if (IPV6_GET_NH(p) != 44) goto error; if (IPV6_GET_PLEN(p) != sizeof(IPV6FragHdr) + content_len) goto error; return p; error: fprintf(stderr, "Error building test packet.\n"); if (p != NULL) SCFree(p); return NULL; }
168,307
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ip_options_build(struct sk_buff * skb, struct ip_options * opt, __be32 daddr, struct rtable *rt, int is_frag) { unsigned char *iph = skb_network_header(skb); memcpy(&(IPCB(skb)->opt), opt, sizeof(struct ip_options)); memcpy(iph+sizeof(struct iphdr), opt->__data, opt->optlen); opt = &(IPCB(skb)->opt); if (opt->srr) memcpy(iph+opt->srr+iph[opt->srr+1]-4, &daddr, 4); if (!is_frag) { if (opt->rr_needaddr) ip_rt_get_source(iph+opt->rr+iph[opt->rr+2]-5, rt); if (opt->ts_needaddr) ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, rt); if (opt->ts_needtime) { struct timespec tv; __be32 midtime; getnstimeofday(&tv); midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC); memcpy(iph+opt->ts+iph[opt->ts+2]-5, &midtime, 4); } return; } if (opt->rr) { memset(iph+opt->rr, IPOPT_NOP, iph[opt->rr+1]); opt->rr = 0; opt->rr_needaddr = 0; } if (opt->ts) { memset(iph+opt->ts, IPOPT_NOP, iph[opt->ts+1]); opt->ts = 0; opt->ts_needaddr = opt->ts_needtime = 0; } } Commit Message: inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <[email protected]> Cc: Herbert Xu <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-362
void ip_options_build(struct sk_buff * skb, struct ip_options * opt, void ip_options_build(struct sk_buff *skb, struct ip_options *opt, __be32 daddr, struct rtable *rt, int is_frag) { unsigned char *iph = skb_network_header(skb); memcpy(&(IPCB(skb)->opt), opt, sizeof(struct ip_options)); memcpy(iph+sizeof(struct iphdr), opt->__data, opt->optlen); opt = &(IPCB(skb)->opt); if (opt->srr) memcpy(iph+opt->srr+iph[opt->srr+1]-4, &daddr, 4); if (!is_frag) { if (opt->rr_needaddr) ip_rt_get_source(iph+opt->rr+iph[opt->rr+2]-5, rt); if (opt->ts_needaddr) ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, rt); if (opt->ts_needtime) { struct timespec tv; __be32 midtime; getnstimeofday(&tv); midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC); memcpy(iph+opt->ts+iph[opt->ts+2]-5, &midtime, 4); } return; } if (opt->rr) { memset(iph+opt->rr, IPOPT_NOP, iph[opt->rr+1]); opt->rr = 0; opt->rr_needaddr = 0; } if (opt->ts) { memset(iph+opt->ts, IPOPT_NOP, iph[opt->ts+1]); opt->ts = 0; opt->ts_needaddr = opt->ts_needtime = 0; } }
165,556
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void usage() { fprintf (stderr, "PNM2PNG\n"); fprintf (stderr, " by Willem van Schaik, 1999\n"); #ifdef __TURBOC__ fprintf (stderr, " for Turbo-C and Borland-C compilers\n"); #else fprintf (stderr, " for Linux (and Unix) compilers\n"); #endif fprintf (stderr, "Usage: pnm2png [options] <file>.<pnm> [<file>.png]\n"); fprintf (stderr, " or: ... | pnm2png [options]\n"); fprintf (stderr, "Options:\n"); fprintf (stderr, " -i[nterlace] write png-file with interlacing on\n"); fprintf (stderr, " -a[lpha] <file>.pgm read PNG alpha channel as pgm-file\n"); fprintf (stderr, " -h | -? print this help-information\n"); } Commit Message: DO NOT MERGE Update libpng to 1.6.20 BUG:23265085 Change-Id: I85199805636d771f3597b691b63bc0bf46084833 (cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82) CWE ID:
void usage() { fprintf (stderr, "PNM2PNG\n"); fprintf (stderr, " by Willem van Schaik, 1999\n"); #ifdef __TURBOC__ fprintf (stderr, " for Turbo-C and Borland-C compilers\n"); #else fprintf (stderr, " for Linux (and Unix) compilers\n"); #endif fprintf (stderr, "Usage: pnm2png [options] <file>.<pnm> [<file>.png]\n"); fprintf (stderr, " or: ... | pnm2png [options]\n"); fprintf (stderr, "Options:\n"); fprintf (stderr, " -i[nterlace] write png-file with interlacing on\n"); fprintf (stderr, " -a[lpha] <file>.pgm read PNG alpha channel as pgm-file\n"); fprintf (stderr, " -h | -? print this help-information\n"); }
173,726
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: long long mkvparser::GetUIntLength(IMkvReader* pReader, long long pos, long& len) { assert(pReader); assert(pos >= 0); long long total, available; int status = pReader->Length(&total, &available); assert(status >= 0); assert((total < 0) || (available <= total)); len = 1; if (pos >= available) return pos; // too few bytes available unsigned char b; status = pReader->Read(pos, 1, &b); if (status < 0) return status; assert(status == 0); if (b == 0) // we can't handle u-int values larger than 8 bytes return E_FILE_FORMAT_INVALID; unsigned char m = 0x80; while (!(b & m)) { m >>= 1; ++len; } return 0; // success } Commit Message: external/libvpx/libwebm: Update snapshot Update libwebm snapshot. This update contains security fixes from upstream. Upstream git hash: 229f49347d19b0ca0941e072b199a242ef6c5f2b BUG=23167726 Change-Id: Id3e140e7b31ae11294724b1ecfe2e9c83b4d4207 (cherry picked from commit d0281a15b3c6bd91756e453cc9398c5ef412d99a) CWE ID: CWE-20
long long mkvparser::GetUIntLength(IMkvReader* pReader, long long pos, long long ReadID(IMkvReader* pReader, long long pos, long& len) { const long long id = ReadUInt(pReader, pos, len); if (id < 0 || len < 1 || len > 4) { // An ID must be at least 1 byte long, and cannot exceed 4. // See EBMLMaxIDLength: http://www.matroska.org/technical/specs/index.html return E_FILE_FORMAT_INVALID; } return id; } long long GetUIntLength(IMkvReader* pReader, long long pos, long& len) { if (!pReader || pos < 0) return E_FILE_FORMAT_INVALID; long long total, available; int status = pReader->Length(&total, &available); if (status < 0 || (total >= 0 && available > total)) return E_FILE_FORMAT_INVALID; len = 1; if (pos >= available) return pos; // too few bytes available unsigned char b; status = pReader->Read(pos, 1, &b); if (status != 0) return status; if (b == 0) // we can't handle u-int values larger than 8 bytes return E_FILE_FORMAT_INVALID; unsigned char m = 0x80; while (!(b & m)) { m >>= 1; ++len; } return 0; // success }
173,824
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void Segment::PreloadCluster(Cluster* pCluster, ptrdiff_t idx) { assert(pCluster); assert(pCluster->m_index < 0); assert(idx >= m_clusterCount); const long count = m_clusterCount + m_clusterPreloadCount; long& size = m_clusterSize; assert(size >= count); if (count >= size) { const long n = (size <= 0) ? 2048 : 2 * size; Cluster** const qq = new Cluster* [n]; Cluster** q = qq; Cluster** p = m_clusters; Cluster** const pp = p + count; while (p != pp) *q++ = *p++; delete[] m_clusters; m_clusters = qq; size = n; } assert(m_clusters); Cluster** const p = m_clusters + idx; Cluster** q = m_clusters + count; assert(q >= p); assert(q < (m_clusters + size)); while (q > p) { Cluster** const qq = q - 1; assert((*qq)->m_index < 0); *q = *qq; q = qq; } m_clusters[idx] = pCluster; ++m_clusterPreloadCount; } Commit Message: external/libvpx/libwebm: Update snapshot Update libwebm snapshot. This update contains security fixes from upstream. Upstream git hash: 229f49347d19b0ca0941e072b199a242ef6c5f2b BUG=23167726 Change-Id: Id3e140e7b31ae11294724b1ecfe2e9c83b4d4207 (cherry picked from commit d0281a15b3c6bd91756e453cc9398c5ef412d99a) CWE ID: CWE-20
void Segment::PreloadCluster(Cluster* pCluster, ptrdiff_t idx) { bool Segment::PreloadCluster(Cluster* pCluster, ptrdiff_t idx) { assert(pCluster); assert(pCluster->m_index < 0); assert(idx >= m_clusterCount); const long count = m_clusterCount + m_clusterPreloadCount; long& size = m_clusterSize; assert(size >= count); if (count >= size) { const long n = (size <= 0) ? 2048 : 2 * size; Cluster** const qq = new (std::nothrow) Cluster*[n]; if (qq == NULL) return false; Cluster** q = qq; Cluster** p = m_clusters; Cluster** const pp = p + count; while (p != pp) *q++ = *p++; delete[] m_clusters; m_clusters = qq; size = n; } assert(m_clusters); Cluster** const p = m_clusters + idx; Cluster** q = m_clusters + count; assert(q >= p); assert(q < (m_clusters + size)); while (q > p) { Cluster** const qq = q - 1; assert((*qq)->m_index < 0); *q = *qq; q = qq; } m_clusters[idx] = pCluster; ++m_clusterPreloadCount; return true; }
173,860
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void board_init_f_init_reserve(ulong base) { struct global_data *gd_ptr; /* * clear GD entirely and set it up. * Use gd_ptr, as gd may not be properly set yet. */ gd_ptr = (struct global_data *)base; /* zero the area */ memset(gd_ptr, '\0', sizeof(*gd)); /* set GD unless architecture did it already */ #if !defined(CONFIG_ARM) arch_setup_gd(gd_ptr); #endif if (CONFIG_IS_ENABLED(SYS_REPORT_STACK_F_USAGE)) board_init_f_init_stack_protection_addr(base); /* next alloc will be higher by one GD plus 16-byte alignment */ base += roundup(sizeof(struct global_data), 16); /* * record early malloc arena start. * Use gd as it is now properly set for all architectures. */ #if CONFIG_VAL(SYS_MALLOC_F_LEN) /* go down one 'early malloc arena' */ gd->malloc_base = base; /* next alloc will be higher by one 'early malloc arena' size */ base += CONFIG_VAL(SYS_MALLOC_F_LEN); #endif if (CONFIG_IS_ENABLED(SYS_REPORT_STACK_F_USAGE)) board_init_f_init_stack_protection(); } Commit Message: Merge branch '2020-01-22-master-imports' - Re-add U8500 platform support - Add bcm968360bg support - Assorted Keymile fixes - Other assorted bugfixes CWE ID: CWE-787
void board_init_f_init_reserve(ulong base) { struct global_data *gd_ptr; /* * clear GD entirely and set it up. * Use gd_ptr, as gd may not be properly set yet. */ gd_ptr = (struct global_data *)base; /* zero the area */ memset(gd_ptr, '\0', sizeof(*gd)); /* set GD unless architecture did it already */ #if !defined(CONFIG_ARM) arch_setup_gd(gd_ptr); #endif if (CONFIG_IS_ENABLED(SYS_REPORT_STACK_F_USAGE)) board_init_f_init_stack_protection_addr(base); /* next alloc will be higher by one GD plus 16-byte alignment */ base += roundup(sizeof(struct global_data), 16); /* * record early malloc arena start. * Use gd as it is now properly set for all architectures. */ #if CONFIG_VAL(SYS_MALLOC_F_LEN) /* go down one 'early malloc arena' */ gd->malloc_base = base; #endif if (CONFIG_IS_ENABLED(SYS_REPORT_STACK_F_USAGE)) board_init_f_init_stack_protection(); }
169,639
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int blkcg_init_queue(struct request_queue *q) { struct blkcg_gq *new_blkg, *blkg; bool preloaded; int ret; new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); if (!new_blkg) return -ENOMEM; preloaded = !radix_tree_preload(GFP_KERNEL); /* * Make sure the root blkg exists and count the existing blkgs. As * @q is bypassing at this point, blkg_lookup_create() can't be * used. Open code insertion. */ rcu_read_lock(); spin_lock_irq(q->queue_lock); blkg = blkg_create(&blkcg_root, q, new_blkg); spin_unlock_irq(q->queue_lock); rcu_read_unlock(); if (preloaded) radix_tree_preload_end(); if (IS_ERR(blkg)) { blkg_free(new_blkg); return PTR_ERR(blkg); } q->root_blkg = blkg; q->root_rl.blkg = blkg; ret = blk_throtl_init(q); if (ret) { spin_lock_irq(q->queue_lock); blkg_destroy_all(q); spin_unlock_irq(q->queue_lock); } return ret; } Commit Message: blkcg: fix double free of new_blkg in blkcg_init_queue If blkg_create fails, new_blkg passed as an argument will be freed by blkg_create, so there is no need to free it again. Signed-off-by: Hou Tao <[email protected]> Signed-off-by: Jens Axboe <[email protected]> CWE ID: CWE-415
int blkcg_init_queue(struct request_queue *q) { struct blkcg_gq *new_blkg, *blkg; bool preloaded; int ret; new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); if (!new_blkg) return -ENOMEM; preloaded = !radix_tree_preload(GFP_KERNEL); /* * Make sure the root blkg exists and count the existing blkgs. As * @q is bypassing at this point, blkg_lookup_create() can't be * used. Open code insertion. */ rcu_read_lock(); spin_lock_irq(q->queue_lock); blkg = blkg_create(&blkcg_root, q, new_blkg); spin_unlock_irq(q->queue_lock); rcu_read_unlock(); if (preloaded) radix_tree_preload_end(); if (IS_ERR(blkg)) return PTR_ERR(blkg); q->root_blkg = blkg; q->root_rl.blkg = blkg; ret = blk_throtl_init(q); if (ret) { spin_lock_irq(q->queue_lock); blkg_destroy_all(q); spin_unlock_irq(q->queue_lock); } return ret; }
169,318
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: RTCSessionDescriptionRequestSuccededTask(MockWebRTCPeerConnectionHandler* object, const WebKit::WebRTCSessionDescriptionRequest& request, const WebKit::WebRTCSessionDescriptionDescriptor& result) : MethodTask<MockWebRTCPeerConnectionHandler>(object) , m_request(request) , m_result(result) { } Commit Message: Unreviewed, rolling out r127612, r127660, and r127664. http://trac.webkit.org/changeset/127612 http://trac.webkit.org/changeset/127660 http://trac.webkit.org/changeset/127664 https://bugs.webkit.org/show_bug.cgi?id=95920 Source/Platform: * Platform.gypi: * chromium/public/WebRTCPeerConnectionHandler.h: (WebKit): (WebRTCPeerConnectionHandler): * chromium/public/WebRTCVoidRequest.h: Removed. Source/WebCore: * CMakeLists.txt: * GNUmakefile.list.am: * Modules/mediastream/RTCErrorCallback.h: (WebCore): (RTCErrorCallback): * Modules/mediastream/RTCErrorCallback.idl: * Modules/mediastream/RTCPeerConnection.cpp: (WebCore::RTCPeerConnection::createOffer): * Modules/mediastream/RTCPeerConnection.h: (WebCore): (RTCPeerConnection): * Modules/mediastream/RTCPeerConnection.idl: * Modules/mediastream/RTCSessionDescriptionCallback.h: (WebCore): (RTCSessionDescriptionCallback): * Modules/mediastream/RTCSessionDescriptionCallback.idl: * Modules/mediastream/RTCSessionDescriptionRequestImpl.cpp: (WebCore::RTCSessionDescriptionRequestImpl::create): (WebCore::RTCSessionDescriptionRequestImpl::RTCSessionDescriptionRequestImpl): (WebCore::RTCSessionDescriptionRequestImpl::requestSucceeded): (WebCore::RTCSessionDescriptionRequestImpl::requestFailed): (WebCore::RTCSessionDescriptionRequestImpl::clear): * Modules/mediastream/RTCSessionDescriptionRequestImpl.h: (RTCSessionDescriptionRequestImpl): * Modules/mediastream/RTCVoidRequestImpl.cpp: Removed. * Modules/mediastream/RTCVoidRequestImpl.h: Removed. * WebCore.gypi: * platform/chromium/support/WebRTCVoidRequest.cpp: Removed. * platform/mediastream/RTCPeerConnectionHandler.cpp: (RTCPeerConnectionHandlerDummy): (WebCore::RTCPeerConnectionHandlerDummy::RTCPeerConnectionHandlerDummy): * platform/mediastream/RTCPeerConnectionHandler.h: (WebCore): (WebCore::RTCPeerConnectionHandler::~RTCPeerConnectionHandler): (RTCPeerConnectionHandler): (WebCore::RTCPeerConnectionHandler::RTCPeerConnectionHandler): * platform/mediastream/RTCVoidRequest.h: Removed. * platform/mediastream/chromium/RTCPeerConnectionHandlerChromium.cpp: * platform/mediastream/chromium/RTCPeerConnectionHandlerChromium.h: (RTCPeerConnectionHandlerChromium): Tools: * DumpRenderTree/chromium/MockWebRTCPeerConnectionHandler.cpp: (MockWebRTCPeerConnectionHandler::SuccessCallbackTask::SuccessCallbackTask): (MockWebRTCPeerConnectionHandler::SuccessCallbackTask::runIfValid): (MockWebRTCPeerConnectionHandler::FailureCallbackTask::FailureCallbackTask): (MockWebRTCPeerConnectionHandler::FailureCallbackTask::runIfValid): (MockWebRTCPeerConnectionHandler::createOffer): * DumpRenderTree/chromium/MockWebRTCPeerConnectionHandler.h: (MockWebRTCPeerConnectionHandler): (SuccessCallbackTask): (FailureCallbackTask): LayoutTests: * fast/mediastream/RTCPeerConnection-createOffer.html: * fast/mediastream/RTCPeerConnection-localDescription-expected.txt: Removed. * fast/mediastream/RTCPeerConnection-localDescription.html: Removed. * fast/mediastream/RTCPeerConnection-remoteDescription-expected.txt: Removed. * fast/mediastream/RTCPeerConnection-remoteDescription.html: Removed. git-svn-id: svn://svn.chromium.org/blink/trunk@127679 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-20
RTCSessionDescriptionRequestSuccededTask(MockWebRTCPeerConnectionHandler* object, const WebKit::WebRTCSessionDescriptionRequest& request, const WebKit::WebRTCSessionDescriptionDescriptor& result)
170,357
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void EnterpriseEnrollmentScreen::OnAuthCancelled() { UMA_HISTOGRAM_ENUMERATION(policy::kMetricEnrollment, policy::kMetricEnrollmentCancelled, policy::kMetricEnrollmentSize); auth_fetcher_.reset(); registrar_.reset(); g_browser_process->browser_policy_connector()->DeviceStopAutoRetry(); get_screen_observer()->OnExit( ScreenObserver::ENTERPRISE_ENROLLMENT_CANCELLED); } Commit Message: Reset the device policy machinery upon retrying enrollment. BUG=chromium-os:18208 TEST=See bug description Review URL: http://codereview.chromium.org/7676005 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@97615 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
void EnterpriseEnrollmentScreen::OnAuthCancelled() { UMA_HISTOGRAM_ENUMERATION(policy::kMetricEnrollment, policy::kMetricEnrollmentCancelled, policy::kMetricEnrollmentSize); auth_fetcher_.reset(); registrar_.reset(); g_browser_process->browser_policy_connector()->ResetDevicePolicy(); get_screen_observer()->OnExit( ScreenObserver::ENTERPRISE_ENROLLMENT_CANCELLED); }
170,276
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: SplashError Splash::drawImage(SplashImageSource src, void *srcData, SplashColorMode srcMode, GBool srcAlpha, int w, int h, SplashCoord *mat) { SplashPipe pipe; GBool ok, rot; SplashCoord xScale, yScale, xShear, yShear, yShear1; int tx, tx2, ty, ty2, scaledWidth, scaledHeight, xSign, ySign; int ulx, uly, llx, lly, urx, ury, lrx, lry; int ulx1, uly1, llx1, lly1, urx1, ury1, lrx1, lry1; int xMin, xMax, yMin, yMax; SplashClipResult clipRes, clipRes2; int yp, yq, yt, yStep, lastYStep; int xp, xq, xt, xStep, xSrc; int k1, spanXMin, spanXMax, spanY; SplashColorPtr colorBuf, p; SplashColor pix; Guchar *alphaBuf, *q; #if SPLASH_CMYK int pixAcc0, pixAcc1, pixAcc2, pixAcc3; #else int pixAcc0, pixAcc1, pixAcc2; #endif int alphaAcc; SplashCoord pixMul, alphaMul, alpha; int x, y, x1, x2, y2; SplashCoord y1; int nComps, n, m, i, j; if (debugMode) { printf("drawImage: srcMode=%d srcAlpha=%d w=%d h=%d mat=[%.2f %.2f %.2f %.2f %.2f %.2f]\n", srcMode, srcAlpha, w, h, (double)mat[0], (double)mat[1], (double)mat[2], (double)mat[3], (double)mat[4], (double)mat[5]); } ok = gFalse; // make gcc happy nComps = 0; // make gcc happy switch (bitmap->mode) { case splashModeMono1: case splashModeMono8: ok = srcMode == splashModeMono8; nComps = 1; break; case splashModeRGB8: ok = srcMode == splashModeRGB8; nComps = 3; break; case splashModeXBGR8: ok = srcMode == splashModeXBGR8; nComps = 4; break; case splashModeBGR8: ok = srcMode == splashModeBGR8; nComps = 3; break; #if SPLASH_CMYK case splashModeCMYK8: ok = srcMode == splashModeCMYK8; nComps = 4; break; #endif } if (!ok) { return splashErrModeMismatch; } if (splashAbs(mat[0] * mat[3] - mat[1] * mat[2]) < 0.000001) { return splashErrSingularMatrix; } rot = splashAbs(mat[1]) > splashAbs(mat[0]); if (rot) { xScale = -mat[1]; yScale = mat[2] - (mat[0] * mat[3]) / mat[1]; xShear = -mat[3] / yScale; yShear = -mat[0] / mat[1]; } else { xScale = mat[0]; yScale = mat[3] - (mat[1] * mat[2]) / mat[0]; xShear = mat[2] / yScale; yShear = mat[1] / mat[0]; } if (xScale >= 0) { tx = splashFloor(mat[4] - 0.01); tx2 = splashFloor(mat[4] + xScale + 0.01); } else { tx = splashFloor(mat[4] + 0.01); tx2 = splashFloor(mat[4] + xScale - 0.01); } scaledWidth = abs(tx2 - tx) + 1; if (yScale >= 0) { ty = splashFloor(mat[5] - 0.01); ty2 = splashFloor(mat[5] + yScale + 0.01); } else { ty = splashFloor(mat[5] + 0.01); ty2 = splashFloor(mat[5] + yScale - 0.01); } scaledHeight = abs(ty2 - ty) + 1; xSign = (xScale < 0) ? -1 : 1; ySign = (yScale < 0) ? -1 : 1; yShear1 = (SplashCoord)xSign * yShear; ulx1 = 0; uly1 = 0; urx1 = xSign * (scaledWidth - 1); ury1 = (int)(yShear * urx1); llx1 = splashRound(xShear * ySign * (scaledHeight - 1)); lly1 = ySign * (scaledHeight - 1) + (int)(yShear * llx1); lrx1 = xSign * (scaledWidth - 1) + splashRound(xShear * ySign * (scaledHeight - 1)); lry1 = ySign * (scaledHeight - 1) + (int)(yShear * lrx1); if (rot) { ulx = tx + uly1; uly = ty - ulx1; urx = tx + ury1; ury = ty - urx1; llx = tx + lly1; lly = ty - llx1; lrx = tx + lry1; lry = ty - lrx1; } else { ulx = tx + ulx1; uly = ty + uly1; urx = tx + urx1; ury = ty + ury1; llx = tx + llx1; lly = ty + lly1; lrx = tx + lrx1; lry = ty + lry1; } xMin = (ulx < urx) ? (ulx < llx) ? (ulx < lrx) ? ulx : lrx : (llx < lrx) ? llx : lrx : (urx < llx) ? (urx < lrx) ? urx : lrx : (llx < lrx) ? llx : lrx; xMax = (ulx > urx) ? (ulx > llx) ? (ulx > lrx) ? ulx : lrx : (llx > lrx) ? llx : lrx : (urx > llx) ? (urx > lrx) ? urx : lrx : (llx > lrx) ? llx : lrx; yMin = (uly < ury) ? (uly < lly) ? (uly < lry) ? uly : lry : (lly < lry) ? lly : lry : (ury < lly) ? (ury < lry) ? ury : lry : (lly < lry) ? lly : lry; yMax = (uly > ury) ? (uly > lly) ? (uly > lry) ? uly : lry : (lly > lry) ? lly : lry : (ury > lly) ? (ury > lry) ? ury : lry : (lly > lry) ? lly : lry; clipRes = state->clip->testRect(xMin, yMin, xMax, yMax); opClipRes = clipRes; if (clipRes == splashClipAllOutside) { return splashOk; } yp = h / scaledHeight; yq = h % scaledHeight; xp = w / scaledWidth; xq = w % scaledWidth; colorBuf = (SplashColorPtr)gmalloc((yp + 1) * w * nComps); if (srcAlpha) { alphaBuf = (Guchar *)gmalloc((yp + 1) * w); } else { alphaBuf = NULL; } pixAcc0 = pixAcc1 = pixAcc2 = 0; // make gcc happy #if SPLASH_CMYK pixAcc3 = 0; // make gcc happy #endif pipeInit(&pipe, 0, 0, NULL, pix, state->fillAlpha, srcAlpha || (vectorAntialias && clipRes != splashClipAllInside), gFalse); if (vectorAntialias) { drawAAPixelInit(); } if (srcAlpha) { yt = 0; lastYStep = 1; for (y = 0; y < scaledHeight; ++y) { yStep = yp; yt += yq; if (yt >= scaledHeight) { yt -= scaledHeight; ++yStep; } n = (yp > 0) ? yStep : lastYStep; if (n > 0) { p = colorBuf; q = alphaBuf; for (i = 0; i < n; ++i) { (*src)(srcData, p, q); p += w * nComps; q += w; } } lastYStep = yStep; k1 = splashRound(xShear * ySign * y); if (clipRes != splashClipAllInside && !rot && (int)(yShear * k1) == (int)(yShear * (xSign * (scaledWidth - 1) + k1))) { if (xSign > 0) { spanXMin = tx + k1; spanXMax = spanXMin + (scaledWidth - 1); } else { spanXMax = tx + k1; spanXMin = spanXMax - (scaledWidth - 1); } spanY = ty + ySign * y + (int)(yShear * k1); clipRes2 = state->clip->testSpan(spanXMin, spanXMax, spanY); if (clipRes2 == splashClipAllOutside) { continue; } } else { clipRes2 = clipRes; } xt = 0; xSrc = 0; x1 = k1; y1 = (SplashCoord)ySign * y + yShear * x1; if (yShear1 < 0) { y1 += 0.999; } n = yStep > 0 ? yStep : 1; switch (srcMode) { case splashModeMono1: case splashModeMono8: for (x = 0; x < scaledWidth; ++x) { xStep = xp; xt += xq; if (xt >= scaledWidth) { xt -= scaledWidth; ++xStep; } if (rot) { x2 = (int)y1; y2 = -x1; } else { x2 = x1; y2 = (int)y1; } m = xStep > 0 ? xStep : 1; alphaAcc = 0; p = colorBuf + xSrc; q = alphaBuf + xSrc; pixAcc0 = 0; for (i = 0; i < n; ++i) { for (j = 0; j < m; ++j) { pixAcc0 += *p++; alphaAcc += *q++; } p += w - m; q += w - m; } pixMul = (SplashCoord)1 / (SplashCoord)(n * m); alphaMul = pixMul * (1.0 / 255.0); alpha = (SplashCoord)alphaAcc * alphaMul; if (alpha > 0) { pix[0] = (int)((SplashCoord)pixAcc0 * pixMul); pipe.shape = alpha; if (vectorAntialias && clipRes != splashClipAllInside) { drawAAPixel(&pipe, tx + x2, ty + y2); } else { drawPixel(&pipe, tx + x2, ty + y2, clipRes2 == splashClipAllInside); } } xSrc += xStep; x1 += xSign; y1 += yShear1; } break; case splashModeRGB8: case splashModeBGR8: for (x = 0; x < scaledWidth; ++x) { xStep = xp; xt += xq; if (xt >= scaledWidth) { xt -= scaledWidth; ++xStep; } if (rot) { x2 = (int)y1; y2 = -x1; } else { x2 = x1; y2 = (int)y1; } m = xStep > 0 ? xStep : 1; alphaAcc = 0; p = colorBuf + xSrc * 3; q = alphaBuf + xSrc; pixAcc0 = pixAcc1 = pixAcc2 = 0; for (i = 0; i < n; ++i) { for (j = 0; j < m; ++j) { pixAcc0 += *p++; pixAcc1 += *p++; pixAcc2 += *p++; alphaAcc += *q++; } p += 3 * (w - m); q += w - m; } pixMul = (SplashCoord)1 / (SplashCoord)(n * m); alphaMul = pixMul * (1.0 / 255.0); alpha = (SplashCoord)alphaAcc * alphaMul; if (alpha > 0) { pix[0] = (int)((SplashCoord)pixAcc0 * pixMul); pix[1] = (int)((SplashCoord)pixAcc1 * pixMul); pix[2] = (int)((SplashCoord)pixAcc2 * pixMul); pipe.shape = alpha; if (vectorAntialias && clipRes != splashClipAllInside) { drawAAPixel(&pipe, tx + x2, ty + y2); } else { drawPixel(&pipe, tx + x2, ty + y2, clipRes2 == splashClipAllInside); } } xSrc += xStep; x1 += xSign; y1 += yShear1; } break; case splashModeXBGR8: for (x = 0; x < scaledWidth; ++x) { xStep = xp; xt += xq; if (xt >= scaledWidth) { xt -= scaledWidth; ++xStep; } if (rot) { x2 = (int)y1; y2 = -x1; } else { x2 = x1; y2 = (int)y1; } m = xStep > 0 ? xStep : 1; alphaAcc = 0; p = colorBuf + xSrc * 4; q = alphaBuf + xSrc; pixAcc0 = pixAcc1 = pixAcc2 = 0; for (i = 0; i < n; ++i) { for (j = 0; j < m; ++j) { pixAcc0 += *p++; pixAcc1 += *p++; pixAcc2 += *p++; *p++; alphaAcc += *q++; } p += 4 * (w - m); q += w - m; } pixMul = (SplashCoord)1 / (SplashCoord)(n * m); alphaMul = pixMul * (1.0 / 255.0); alpha = (SplashCoord)alphaAcc * alphaMul; if (alpha > 0) { pix[0] = (int)((SplashCoord)pixAcc0 * pixMul); pix[1] = (int)((SplashCoord)pixAcc1 * pixMul); pix[2] = (int)((SplashCoord)pixAcc2 * pixMul); pix[3] = 255; pipe.shape = alpha; if (vectorAntialias && clipRes != splashClipAllInside) { drawAAPixel(&pipe, tx + x2, ty + y2); } else { drawPixel(&pipe, tx + x2, ty + y2, clipRes2 == splashClipAllInside); } } xSrc += xStep; x1 += xSign; y1 += yShear1; } break; #if SPLASH_CMYK case splashModeCMYK8: for (x = 0; x < scaledWidth; ++x) { xStep = xp; xt += xq; if (xt >= scaledWidth) { xt -= scaledWidth; ++xStep; } if (rot) { x2 = (int)y1; y2 = -x1; } else { x2 = x1; y2 = (int)y1; } m = xStep > 0 ? xStep : 1; alphaAcc = 0; p = colorBuf + xSrc * 4; q = alphaBuf + xSrc; pixAcc0 = pixAcc1 = pixAcc2 = pixAcc3 = 0; for (i = 0; i < n; ++i) { for (j = 0; j < m; ++j) { pixAcc0 += *p++; pixAcc1 += *p++; pixAcc2 += *p++; pixAcc3 += *p++; alphaAcc += *q++; } p += 4 * (w - m); q += w - m; } pixMul = (SplashCoord)1 / (SplashCoord)(n * m); alphaMul = pixMul * (1.0 / 255.0); alpha = (SplashCoord)alphaAcc * alphaMul; if (alpha > 0) { pix[0] = (int)((SplashCoord)pixAcc0 * pixMul); pix[1] = (int)((SplashCoord)pixAcc1 * pixMul); pix[2] = (int)((SplashCoord)pixAcc2 * pixMul); pix[3] = (int)((SplashCoord)pixAcc3 * pixMul); pipe.shape = alpha; if (vectorAntialias && clipRes != splashClipAllInside) { drawAAPixel(&pipe, tx + x2, ty + y2); } else { drawPixel(&pipe, tx + x2, ty + y2, clipRes2 == splashClipAllInside); } } xSrc += xStep; x1 += xSign; y1 += yShear1; } break; #endif // SPLASH_CMYK } } } else { yt = 0; lastYStep = 1; for (y = 0; y < scaledHeight; ++y) { yStep = yp; yt += yq; if (yt >= scaledHeight) { yt -= scaledHeight; ++yStep; } n = (yp > 0) ? yStep : lastYStep; if (n > 0) { p = colorBuf; for (i = 0; i < n; ++i) { (*src)(srcData, p, NULL); p += w * nComps; } } lastYStep = yStep; k1 = splashRound(xShear * ySign * y); if (clipRes != splashClipAllInside && !rot && (int)(yShear * k1) == (int)(yShear * (xSign * (scaledWidth - 1) + k1))) { if (xSign > 0) { spanXMin = tx + k1; spanXMax = spanXMin + (scaledWidth - 1); } else { spanXMax = tx + k1; spanXMin = spanXMax - (scaledWidth - 1); } spanY = ty + ySign * y + (int)(yShear * k1); clipRes2 = state->clip->testSpan(spanXMin, spanXMax, spanY); if (clipRes2 == splashClipAllOutside) { continue; } } else { clipRes2 = clipRes; } xt = 0; xSrc = 0; x1 = k1; y1 = (SplashCoord)ySign * y + yShear * x1; if (yShear1 < 0) { y1 += 0.999; } n = yStep > 0 ? yStep : 1; switch (srcMode) { case splashModeMono1: case splashModeMono8: for (x = 0; x < scaledWidth; ++x) { xStep = xp; xt += xq; if (xt >= scaledWidth) { xt -= scaledWidth; ++xStep; } if (rot) { x2 = (int)y1; y2 = -x1; } else { x2 = x1; y2 = (int)y1; } m = xStep > 0 ? xStep : 1; p = colorBuf + xSrc; pixAcc0 = 0; for (i = 0; i < n; ++i) { for (j = 0; j < m; ++j) { pixAcc0 += *p++; } p += w - m; } pixMul = (SplashCoord)1 / (SplashCoord)(n * m); pix[0] = (int)((SplashCoord)pixAcc0 * pixMul); if (vectorAntialias && clipRes != splashClipAllInside) { pipe.shape = (SplashCoord)1; drawAAPixel(&pipe, tx + x2, ty + y2); } else { drawPixel(&pipe, tx + x2, ty + y2, clipRes2 == splashClipAllInside); } xSrc += xStep; x1 += xSign; y1 += yShear1; } break; case splashModeRGB8: case splashModeBGR8: for (x = 0; x < scaledWidth; ++x) { xStep = xp; xt += xq; if (xt >= scaledWidth) { xt -= scaledWidth; ++xStep; } if (rot) { x2 = (int)y1; y2 = -x1; } else { x2 = x1; y2 = (int)y1; } m = xStep > 0 ? xStep : 1; p = colorBuf + xSrc * 3; pixAcc0 = pixAcc1 = pixAcc2 = 0; for (i = 0; i < n; ++i) { for (j = 0; j < m; ++j) { pixAcc0 += *p++; pixAcc1 += *p++; pixAcc2 += *p++; } p += 3 * (w - m); } pixMul = (SplashCoord)1 / (SplashCoord)(n * m); pix[0] = (int)((SplashCoord)pixAcc0 * pixMul); pix[1] = (int)((SplashCoord)pixAcc1 * pixMul); pix[2] = (int)((SplashCoord)pixAcc2 * pixMul); if (vectorAntialias && clipRes != splashClipAllInside) { pipe.shape = (SplashCoord)1; drawAAPixel(&pipe, tx + x2, ty + y2); } else { drawPixel(&pipe, tx + x2, ty + y2, clipRes2 == splashClipAllInside); } xSrc += xStep; x1 += xSign; y1 += yShear1; } break; case splashModeXBGR8: for (x = 0; x < scaledWidth; ++x) { xStep = xp; xt += xq; if (xt >= scaledWidth) { xt -= scaledWidth; ++xStep; } if (rot) { x2 = (int)y1; y2 = -x1; } else { x2 = x1; y2 = (int)y1; } m = xStep > 0 ? xStep : 1; p = colorBuf + xSrc * 4; pixAcc0 = pixAcc1 = pixAcc2 = 0; for (i = 0; i < n; ++i) { for (j = 0; j < m; ++j) { pixAcc0 += *p++; pixAcc1 += *p++; pixAcc2 += *p++; *p++; } p += 4 * (w - m); } pixMul = (SplashCoord)1 / (SplashCoord)(n * m); pix[0] = (int)((SplashCoord)pixAcc0 * pixMul); pix[1] = (int)((SplashCoord)pixAcc1 * pixMul); pix[2] = (int)((SplashCoord)pixAcc2 * pixMul); pix[3] = 255; if (vectorAntialias && clipRes != splashClipAllInside) { pipe.shape = (SplashCoord)1; drawAAPixel(&pipe, tx + x2, ty + y2); } else { drawPixel(&pipe, tx + x2, ty + y2, clipRes2 == splashClipAllInside); } xSrc += xStep; x1 += xSign; y1 += yShear1; } break; #if SPLASH_CMYK case splashModeCMYK8: for (x = 0; x < scaledWidth; ++x) { xStep = xp; xt += xq; if (xt >= scaledWidth) { xt -= scaledWidth; ++xStep; } if (rot) { x2 = (int)y1; y2 = -x1; } else { x2 = x1; y2 = (int)y1; } m = xStep > 0 ? xStep : 1; p = colorBuf + xSrc * 4; pixAcc0 = pixAcc1 = pixAcc2 = pixAcc3 = 0; for (i = 0; i < n; ++i) { for (j = 0; j < m; ++j) { pixAcc0 += *p++; pixAcc1 += *p++; pixAcc2 += *p++; pixAcc3 += *p++; } p += 4 * (w - m); } pixMul = (SplashCoord)1 / (SplashCoord)(n * m); pix[0] = (int)((SplashCoord)pixAcc0 * pixMul); pix[1] = (int)((SplashCoord)pixAcc1 * pixMul); pix[2] = (int)((SplashCoord)pixAcc2 * pixMul); pix[3] = (int)((SplashCoord)pixAcc3 * pixMul); if (vectorAntialias && clipRes != splashClipAllInside) { pipe.shape = (SplashCoord)1; drawAAPixel(&pipe, tx + x2, ty + y2); } else { drawPixel(&pipe, tx + x2, ty + y2, clipRes2 == splashClipAllInside); } xSrc += xStep; x1 += xSign; y1 += yShear1; } break; #endif // SPLASH_CMYK } } } gfree(colorBuf); gfree(alphaBuf); return splashOk; } Commit Message: CWE ID: CWE-189
SplashError Splash::drawImage(SplashImageSource src, void *srcData, SplashColorMode srcMode, GBool srcAlpha, int w, int h, SplashCoord *mat) { SplashPipe pipe; GBool ok, rot; SplashCoord xScale, yScale, xShear, yShear, yShear1; int tx, tx2, ty, ty2, scaledWidth, scaledHeight, xSign, ySign; int ulx, uly, llx, lly, urx, ury, lrx, lry; int ulx1, uly1, llx1, lly1, urx1, ury1, lrx1, lry1; int xMin, xMax, yMin, yMax; SplashClipResult clipRes, clipRes2; int yp, yq, yt, yStep, lastYStep; int xp, xq, xt, xStep, xSrc; int k1, spanXMin, spanXMax, spanY; SplashColorPtr colorBuf, p; SplashColor pix; Guchar *alphaBuf, *q; #if SPLASH_CMYK int pixAcc0, pixAcc1, pixAcc2, pixAcc3; #else int pixAcc0, pixAcc1, pixAcc2; #endif int alphaAcc; SplashCoord pixMul, alphaMul, alpha; int x, y, x1, x2, y2; SplashCoord y1; int nComps, n, m, i, j; if (debugMode) { printf("drawImage: srcMode=%d srcAlpha=%d w=%d h=%d mat=[%.2f %.2f %.2f %.2f %.2f %.2f]\n", srcMode, srcAlpha, w, h, (double)mat[0], (double)mat[1], (double)mat[2], (double)mat[3], (double)mat[4], (double)mat[5]); } ok = gFalse; // make gcc happy nComps = 0; // make gcc happy switch (bitmap->mode) { case splashModeMono1: case splashModeMono8: ok = srcMode == splashModeMono8; nComps = 1; break; case splashModeRGB8: ok = srcMode == splashModeRGB8; nComps = 3; break; case splashModeXBGR8: ok = srcMode == splashModeXBGR8; nComps = 4; break; case splashModeBGR8: ok = srcMode == splashModeBGR8; nComps = 3; break; #if SPLASH_CMYK case splashModeCMYK8: ok = srcMode == splashModeCMYK8; nComps = 4; break; #endif } if (!ok) { return splashErrModeMismatch; } if (splashAbs(mat[0] * mat[3] - mat[1] * mat[2]) < 0.000001) { return splashErrSingularMatrix; } rot = splashAbs(mat[1]) > splashAbs(mat[0]); if (rot) { xScale = -mat[1]; yScale = mat[2] - (mat[0] * mat[3]) / mat[1]; xShear = -mat[3] / yScale; yShear = -mat[0] / mat[1]; } else { xScale = mat[0]; yScale = mat[3] - (mat[1] * mat[2]) / mat[0]; xShear = mat[2] / yScale; yShear = mat[1] / mat[0]; } if (xScale >= 0) { tx = splashFloor(mat[4] - 0.01); tx2 = splashFloor(mat[4] + xScale + 0.01); } else { tx = splashFloor(mat[4] + 0.01); tx2 = splashFloor(mat[4] + xScale - 0.01); } scaledWidth = abs(tx2 - tx) + 1; if (yScale >= 0) { ty = splashFloor(mat[5] - 0.01); ty2 = splashFloor(mat[5] + yScale + 0.01); } else { ty = splashFloor(mat[5] + 0.01); ty2 = splashFloor(mat[5] + yScale - 0.01); } scaledHeight = abs(ty2 - ty) + 1; xSign = (xScale < 0) ? -1 : 1; ySign = (yScale < 0) ? -1 : 1; yShear1 = (SplashCoord)xSign * yShear; ulx1 = 0; uly1 = 0; urx1 = xSign * (scaledWidth - 1); ury1 = (int)(yShear * urx1); llx1 = splashRound(xShear * ySign * (scaledHeight - 1)); lly1 = ySign * (scaledHeight - 1) + (int)(yShear * llx1); lrx1 = xSign * (scaledWidth - 1) + splashRound(xShear * ySign * (scaledHeight - 1)); lry1 = ySign * (scaledHeight - 1) + (int)(yShear * lrx1); if (rot) { ulx = tx + uly1; uly = ty - ulx1; urx = tx + ury1; ury = ty - urx1; llx = tx + lly1; lly = ty - llx1; lrx = tx + lry1; lry = ty - lrx1; } else { ulx = tx + ulx1; uly = ty + uly1; urx = tx + urx1; ury = ty + ury1; llx = tx + llx1; lly = ty + lly1; lrx = tx + lrx1; lry = ty + lry1; } xMin = (ulx < urx) ? (ulx < llx) ? (ulx < lrx) ? ulx : lrx : (llx < lrx) ? llx : lrx : (urx < llx) ? (urx < lrx) ? urx : lrx : (llx < lrx) ? llx : lrx; xMax = (ulx > urx) ? (ulx > llx) ? (ulx > lrx) ? ulx : lrx : (llx > lrx) ? llx : lrx : (urx > llx) ? (urx > lrx) ? urx : lrx : (llx > lrx) ? llx : lrx; yMin = (uly < ury) ? (uly < lly) ? (uly < lry) ? uly : lry : (lly < lry) ? lly : lry : (ury < lly) ? (ury < lry) ? ury : lry : (lly < lry) ? lly : lry; yMax = (uly > ury) ? (uly > lly) ? (uly > lry) ? uly : lry : (lly > lry) ? lly : lry : (ury > lly) ? (ury > lry) ? ury : lry : (lly > lry) ? lly : lry; clipRes = state->clip->testRect(xMin, yMin, xMax, yMax); opClipRes = clipRes; if (clipRes == splashClipAllOutside) { return splashOk; } yp = h / scaledHeight; yq = h % scaledHeight; xp = w / scaledWidth; xq = w % scaledWidth; colorBuf = (SplashColorPtr)gmallocn3((yp + 1), w, nComps); if (srcAlpha) { alphaBuf = (Guchar *)gmallocn((yp + 1), w); } else { alphaBuf = NULL; } pixAcc0 = pixAcc1 = pixAcc2 = 0; // make gcc happy #if SPLASH_CMYK pixAcc3 = 0; // make gcc happy #endif pipeInit(&pipe, 0, 0, NULL, pix, state->fillAlpha, srcAlpha || (vectorAntialias && clipRes != splashClipAllInside), gFalse); if (vectorAntialias) { drawAAPixelInit(); } if (srcAlpha) { yt = 0; lastYStep = 1; for (y = 0; y < scaledHeight; ++y) { yStep = yp; yt += yq; if (yt >= scaledHeight) { yt -= scaledHeight; ++yStep; } n = (yp > 0) ? yStep : lastYStep; if (n > 0) { p = colorBuf; q = alphaBuf; for (i = 0; i < n; ++i) { (*src)(srcData, p, q); p += w * nComps; q += w; } } lastYStep = yStep; k1 = splashRound(xShear * ySign * y); if (clipRes != splashClipAllInside && !rot && (int)(yShear * k1) == (int)(yShear * (xSign * (scaledWidth - 1) + k1))) { if (xSign > 0) { spanXMin = tx + k1; spanXMax = spanXMin + (scaledWidth - 1); } else { spanXMax = tx + k1; spanXMin = spanXMax - (scaledWidth - 1); } spanY = ty + ySign * y + (int)(yShear * k1); clipRes2 = state->clip->testSpan(spanXMin, spanXMax, spanY); if (clipRes2 == splashClipAllOutside) { continue; } } else { clipRes2 = clipRes; } xt = 0; xSrc = 0; x1 = k1; y1 = (SplashCoord)ySign * y + yShear * x1; if (yShear1 < 0) { y1 += 0.999; } n = yStep > 0 ? yStep : 1; switch (srcMode) { case splashModeMono1: case splashModeMono8: for (x = 0; x < scaledWidth; ++x) { xStep = xp; xt += xq; if (xt >= scaledWidth) { xt -= scaledWidth; ++xStep; } if (rot) { x2 = (int)y1; y2 = -x1; } else { x2 = x1; y2 = (int)y1; } m = xStep > 0 ? xStep : 1; alphaAcc = 0; p = colorBuf + xSrc; q = alphaBuf + xSrc; pixAcc0 = 0; for (i = 0; i < n; ++i) { for (j = 0; j < m; ++j) { pixAcc0 += *p++; alphaAcc += *q++; } p += w - m; q += w - m; } pixMul = (SplashCoord)1 / (SplashCoord)(n * m); alphaMul = pixMul * (1.0 / 255.0); alpha = (SplashCoord)alphaAcc * alphaMul; if (alpha > 0) { pix[0] = (int)((SplashCoord)pixAcc0 * pixMul); pipe.shape = alpha; if (vectorAntialias && clipRes != splashClipAllInside) { drawAAPixel(&pipe, tx + x2, ty + y2); } else { drawPixel(&pipe, tx + x2, ty + y2, clipRes2 == splashClipAllInside); } } xSrc += xStep; x1 += xSign; y1 += yShear1; } break; case splashModeRGB8: case splashModeBGR8: for (x = 0; x < scaledWidth; ++x) { xStep = xp; xt += xq; if (xt >= scaledWidth) { xt -= scaledWidth; ++xStep; } if (rot) { x2 = (int)y1; y2 = -x1; } else { x2 = x1; y2 = (int)y1; } m = xStep > 0 ? xStep : 1; alphaAcc = 0; p = colorBuf + xSrc * 3; q = alphaBuf + xSrc; pixAcc0 = pixAcc1 = pixAcc2 = 0; for (i = 0; i < n; ++i) { for (j = 0; j < m; ++j) { pixAcc0 += *p++; pixAcc1 += *p++; pixAcc2 += *p++; alphaAcc += *q++; } p += 3 * (w - m); q += w - m; } pixMul = (SplashCoord)1 / (SplashCoord)(n * m); alphaMul = pixMul * (1.0 / 255.0); alpha = (SplashCoord)alphaAcc * alphaMul; if (alpha > 0) { pix[0] = (int)((SplashCoord)pixAcc0 * pixMul); pix[1] = (int)((SplashCoord)pixAcc1 * pixMul); pix[2] = (int)((SplashCoord)pixAcc2 * pixMul); pipe.shape = alpha; if (vectorAntialias && clipRes != splashClipAllInside) { drawAAPixel(&pipe, tx + x2, ty + y2); } else { drawPixel(&pipe, tx + x2, ty + y2, clipRes2 == splashClipAllInside); } } xSrc += xStep; x1 += xSign; y1 += yShear1; } break; case splashModeXBGR8: for (x = 0; x < scaledWidth; ++x) { xStep = xp; xt += xq; if (xt >= scaledWidth) { xt -= scaledWidth; ++xStep; } if (rot) { x2 = (int)y1; y2 = -x1; } else { x2 = x1; y2 = (int)y1; } m = xStep > 0 ? xStep : 1; alphaAcc = 0; p = colorBuf + xSrc * 4; q = alphaBuf + xSrc; pixAcc0 = pixAcc1 = pixAcc2 = 0; for (i = 0; i < n; ++i) { for (j = 0; j < m; ++j) { pixAcc0 += *p++; pixAcc1 += *p++; pixAcc2 += *p++; *p++; alphaAcc += *q++; } p += 4 * (w - m); q += w - m; } pixMul = (SplashCoord)1 / (SplashCoord)(n * m); alphaMul = pixMul * (1.0 / 255.0); alpha = (SplashCoord)alphaAcc * alphaMul; if (alpha > 0) { pix[0] = (int)((SplashCoord)pixAcc0 * pixMul); pix[1] = (int)((SplashCoord)pixAcc1 * pixMul); pix[2] = (int)((SplashCoord)pixAcc2 * pixMul); pix[3] = 255; pipe.shape = alpha; if (vectorAntialias && clipRes != splashClipAllInside) { drawAAPixel(&pipe, tx + x2, ty + y2); } else { drawPixel(&pipe, tx + x2, ty + y2, clipRes2 == splashClipAllInside); } } xSrc += xStep; x1 += xSign; y1 += yShear1; } break; #if SPLASH_CMYK case splashModeCMYK8: for (x = 0; x < scaledWidth; ++x) { xStep = xp; xt += xq; if (xt >= scaledWidth) { xt -= scaledWidth; ++xStep; } if (rot) { x2 = (int)y1; y2 = -x1; } else { x2 = x1; y2 = (int)y1; } m = xStep > 0 ? xStep : 1; alphaAcc = 0; p = colorBuf + xSrc * 4; q = alphaBuf + xSrc; pixAcc0 = pixAcc1 = pixAcc2 = pixAcc3 = 0; for (i = 0; i < n; ++i) { for (j = 0; j < m; ++j) { pixAcc0 += *p++; pixAcc1 += *p++; pixAcc2 += *p++; pixAcc3 += *p++; alphaAcc += *q++; } p += 4 * (w - m); q += w - m; } pixMul = (SplashCoord)1 / (SplashCoord)(n * m); alphaMul = pixMul * (1.0 / 255.0); alpha = (SplashCoord)alphaAcc * alphaMul; if (alpha > 0) { pix[0] = (int)((SplashCoord)pixAcc0 * pixMul); pix[1] = (int)((SplashCoord)pixAcc1 * pixMul); pix[2] = (int)((SplashCoord)pixAcc2 * pixMul); pix[3] = (int)((SplashCoord)pixAcc3 * pixMul); pipe.shape = alpha; if (vectorAntialias && clipRes != splashClipAllInside) { drawAAPixel(&pipe, tx + x2, ty + y2); } else { drawPixel(&pipe, tx + x2, ty + y2, clipRes2 == splashClipAllInside); } } xSrc += xStep; x1 += xSign; y1 += yShear1; } break; #endif // SPLASH_CMYK } } } else { yt = 0; lastYStep = 1; for (y = 0; y < scaledHeight; ++y) { yStep = yp; yt += yq; if (yt >= scaledHeight) { yt -= scaledHeight; ++yStep; } n = (yp > 0) ? yStep : lastYStep; if (n > 0) { p = colorBuf; for (i = 0; i < n; ++i) { (*src)(srcData, p, NULL); p += w * nComps; } } lastYStep = yStep; k1 = splashRound(xShear * ySign * y); if (clipRes != splashClipAllInside && !rot && (int)(yShear * k1) == (int)(yShear * (xSign * (scaledWidth - 1) + k1))) { if (xSign > 0) { spanXMin = tx + k1; spanXMax = spanXMin + (scaledWidth - 1); } else { spanXMax = tx + k1; spanXMin = spanXMax - (scaledWidth - 1); } spanY = ty + ySign * y + (int)(yShear * k1); clipRes2 = state->clip->testSpan(spanXMin, spanXMax, spanY); if (clipRes2 == splashClipAllOutside) { continue; } } else { clipRes2 = clipRes; } xt = 0; xSrc = 0; x1 = k1; y1 = (SplashCoord)ySign * y + yShear * x1; if (yShear1 < 0) { y1 += 0.999; } n = yStep > 0 ? yStep : 1; switch (srcMode) { case splashModeMono1: case splashModeMono8: for (x = 0; x < scaledWidth; ++x) { xStep = xp; xt += xq; if (xt >= scaledWidth) { xt -= scaledWidth; ++xStep; } if (rot) { x2 = (int)y1; y2 = -x1; } else { x2 = x1; y2 = (int)y1; } m = xStep > 0 ? xStep : 1; p = colorBuf + xSrc; pixAcc0 = 0; for (i = 0; i < n; ++i) { for (j = 0; j < m; ++j) { pixAcc0 += *p++; } p += w - m; } pixMul = (SplashCoord)1 / (SplashCoord)(n * m); pix[0] = (int)((SplashCoord)pixAcc0 * pixMul); if (vectorAntialias && clipRes != splashClipAllInside) { pipe.shape = (SplashCoord)1; drawAAPixel(&pipe, tx + x2, ty + y2); } else { drawPixel(&pipe, tx + x2, ty + y2, clipRes2 == splashClipAllInside); } xSrc += xStep; x1 += xSign; y1 += yShear1; } break; case splashModeRGB8: case splashModeBGR8: for (x = 0; x < scaledWidth; ++x) { xStep = xp; xt += xq; if (xt >= scaledWidth) { xt -= scaledWidth; ++xStep; } if (rot) { x2 = (int)y1; y2 = -x1; } else { x2 = x1; y2 = (int)y1; } m = xStep > 0 ? xStep : 1; p = colorBuf + xSrc * 3; pixAcc0 = pixAcc1 = pixAcc2 = 0; for (i = 0; i < n; ++i) { for (j = 0; j < m; ++j) { pixAcc0 += *p++; pixAcc1 += *p++; pixAcc2 += *p++; } p += 3 * (w - m); } pixMul = (SplashCoord)1 / (SplashCoord)(n * m); pix[0] = (int)((SplashCoord)pixAcc0 * pixMul); pix[1] = (int)((SplashCoord)pixAcc1 * pixMul); pix[2] = (int)((SplashCoord)pixAcc2 * pixMul); if (vectorAntialias && clipRes != splashClipAllInside) { pipe.shape = (SplashCoord)1; drawAAPixel(&pipe, tx + x2, ty + y2); } else { drawPixel(&pipe, tx + x2, ty + y2, clipRes2 == splashClipAllInside); } xSrc += xStep; x1 += xSign; y1 += yShear1; } break; case splashModeXBGR8: for (x = 0; x < scaledWidth; ++x) { xStep = xp; xt += xq; if (xt >= scaledWidth) { xt -= scaledWidth; ++xStep; } if (rot) { x2 = (int)y1; y2 = -x1; } else { x2 = x1; y2 = (int)y1; } m = xStep > 0 ? xStep : 1; p = colorBuf + xSrc * 4; pixAcc0 = pixAcc1 = pixAcc2 = 0; for (i = 0; i < n; ++i) { for (j = 0; j < m; ++j) { pixAcc0 += *p++; pixAcc1 += *p++; pixAcc2 += *p++; *p++; } p += 4 * (w - m); } pixMul = (SplashCoord)1 / (SplashCoord)(n * m); pix[0] = (int)((SplashCoord)pixAcc0 * pixMul); pix[1] = (int)((SplashCoord)pixAcc1 * pixMul); pix[2] = (int)((SplashCoord)pixAcc2 * pixMul); pix[3] = 255; if (vectorAntialias && clipRes != splashClipAllInside) { pipe.shape = (SplashCoord)1; drawAAPixel(&pipe, tx + x2, ty + y2); } else { drawPixel(&pipe, tx + x2, ty + y2, clipRes2 == splashClipAllInside); } xSrc += xStep; x1 += xSign; y1 += yShear1; } break; #if SPLASH_CMYK case splashModeCMYK8: for (x = 0; x < scaledWidth; ++x) { xStep = xp; xt += xq; if (xt >= scaledWidth) { xt -= scaledWidth; ++xStep; } if (rot) { x2 = (int)y1; y2 = -x1; } else { x2 = x1; y2 = (int)y1; } m = xStep > 0 ? xStep : 1; p = colorBuf + xSrc * 4; pixAcc0 = pixAcc1 = pixAcc2 = pixAcc3 = 0; for (i = 0; i < n; ++i) { for (j = 0; j < m; ++j) { pixAcc0 += *p++; pixAcc1 += *p++; pixAcc2 += *p++; pixAcc3 += *p++; } p += 4 * (w - m); } pixMul = (SplashCoord)1 / (SplashCoord)(n * m); pix[0] = (int)((SplashCoord)pixAcc0 * pixMul); pix[1] = (int)((SplashCoord)pixAcc1 * pixMul); pix[2] = (int)((SplashCoord)pixAcc2 * pixMul); pix[3] = (int)((SplashCoord)pixAcc3 * pixMul); if (vectorAntialias && clipRes != splashClipAllInside) { pipe.shape = (SplashCoord)1; drawAAPixel(&pipe, tx + x2, ty + y2); } else { drawPixel(&pipe, tx + x2, ty + y2, clipRes2 == splashClipAllInside); } xSrc += xStep; x1 += xSign; y1 += yShear1; } break; #endif // SPLASH_CMYK } } } gfree(colorBuf); gfree(alphaBuf); return splashOk; }
164,618
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key; key_ref_t key_ref; long ret; /* find the key first */ key_ref = lookup_user_key(keyid, 0, 0); if (IS_ERR(key_ref)) { ret = -ENOKEY; goto error; } key = key_ref_to_ptr(key_ref); /* see if we can read it directly */ ret = key_permission(key_ref, KEY_NEED_READ); if (ret == 0) goto can_read_key; if (ret != -EACCES) goto error; /* we can't; see if it's searchable from this process's keyrings * - we automatically take account of the fact that it may be * dangling off an instantiation key */ if (!is_key_possessed(key_ref)) { ret = -EACCES; goto error2; } /* the key is probably readable - now try to read it */ can_read_key: ret = key_validate(key); if (ret == 0) { ret = -EOPNOTSUPP; if (key->type->read) { /* read the data with the semaphore held (since we * might sleep) */ down_read(&key->sem); ret = key->type->read(key, buffer, buflen); up_read(&key->sem); } } error2: key_put(key); error: return ret; } Commit Message: KEYS: Fix race between read and revoke This fixes CVE-2015-7550. There's a race between keyctl_read() and keyctl_revoke(). If the revoke happens between keyctl_read() checking the validity of a key and the key's semaphore being taken, then the key type read method will see a revoked key. This causes a problem for the user-defined key type because it assumes in its read method that there will always be a payload in a non-revoked key and doesn't check for a NULL pointer. Fix this by making keyctl_read() check the validity of a key after taking semaphore instead of before. I think the bug was introduced with the original keyrings code. This was discovered by a multithreaded test program generated by syzkaller (http://github.com/google/syzkaller). Here's a cleaned up version: #include <sys/types.h> #include <keyutils.h> #include <pthread.h> void *thr0(void *arg) { key_serial_t key = (unsigned long)arg; keyctl_revoke(key); return 0; } void *thr1(void *arg) { key_serial_t key = (unsigned long)arg; char buffer[16]; keyctl_read(key, buffer, 16); return 0; } int main() { key_serial_t key = add_key("user", "%", "foo", 3, KEY_SPEC_USER_KEYRING); pthread_t th[5]; pthread_create(&th[0], 0, thr0, (void *)(unsigned long)key); pthread_create(&th[1], 0, thr1, (void *)(unsigned long)key); pthread_create(&th[2], 0, thr0, (void *)(unsigned long)key); pthread_create(&th[3], 0, thr1, (void *)(unsigned long)key); pthread_join(th[0], 0); pthread_join(th[1], 0); pthread_join(th[2], 0); pthread_join(th[3], 0); return 0; } Build as: cc -o keyctl-race keyctl-race.c -lkeyutils -lpthread Run as: while keyctl-race; do :; done as it may need several iterations to crash the kernel. The crash can be summarised as: BUG: unable to handle kernel NULL pointer dereference at 0000000000000010 IP: [<ffffffff81279b08>] user_read+0x56/0xa3 ... Call Trace: [<ffffffff81276aa9>] keyctl_read_key+0xb6/0xd7 [<ffffffff81277815>] SyS_keyctl+0x83/0xe0 [<ffffffff815dbb97>] entry_SYSCALL_64_fastpath+0x12/0x6f Reported-by: Dmitry Vyukov <[email protected]> Signed-off-by: David Howells <[email protected]> Tested-by: Dmitry Vyukov <[email protected]> Cc: [email protected] Signed-off-by: James Morris <[email protected]> CWE ID: CWE-362
long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key; key_ref_t key_ref; long ret; /* find the key first */ key_ref = lookup_user_key(keyid, 0, 0); if (IS_ERR(key_ref)) { ret = -ENOKEY; goto error; } key = key_ref_to_ptr(key_ref); /* see if we can read it directly */ ret = key_permission(key_ref, KEY_NEED_READ); if (ret == 0) goto can_read_key; if (ret != -EACCES) goto error; /* we can't; see if it's searchable from this process's keyrings * - we automatically take account of the fact that it may be * dangling off an instantiation key */ if (!is_key_possessed(key_ref)) { ret = -EACCES; goto error2; } /* the key is probably readable - now try to read it */ can_read_key: ret = -EOPNOTSUPP; if (key->type->read) { /* Read the data with the semaphore held (since we might sleep) * to protect against the key being updated or revoked. */ down_read(&key->sem); ret = key_validate(key); if (ret == 0) ret = key->type->read(key, buffer, buflen); up_read(&key->sem); } error2: key_put(key); error: return ret; }
167,558
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: std::unique_ptr<net::test_server::HttpResponse> GetConfigResponse( const net::test_server::HttpRequest& request) { auto response = std::make_unique<net::test_server::BasicHttpResponse>(); response->set_content(config_.SerializeAsString()); response->set_content_type("text/plain"); if (config_run_loop_) config_run_loop_->Quit(); return response; } Commit Message: Disable all DRP URL fetches when holdback is enabled Disable secure proxy checker, warmup url fetcher and client config fetch when the client is in DRP (Data Reduction Proxy) holdback. This CL does not disable pingbacks when client is in the holdback, but the pingback code is going away soon. Change-Id: Icbb59d814d1452123869c609e0770d1439c1db51 Bug: 984964 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1709965 Commit-Queue: Tarun Bansal <[email protected]> Reviewed-by: Robert Ogden <[email protected]> Cr-Commit-Position: refs/heads/master@{#679649} CWE ID: CWE-416
std::unique_ptr<net::test_server::HttpResponse> GetConfigResponse( const net::test_server::HttpRequest& request) { // Config should not be fetched when in holdback. EXPECT_FALSE( data_reduction_proxy::params::IsIncludedInHoldbackFieldTrial()); auto response = std::make_unique<net::test_server::BasicHttpResponse>(); response->set_content(config_.SerializeAsString()); response->set_content_type("text/plain"); if (config_run_loop_) config_run_loop_->Quit(); return response; }
172,414
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int uhid_write(int fd, const struct uhid_event *ev) { ssize_t ret = write(fd, ev, sizeof(*ev)); if (ret < 0){ int rtn = -errno; APPL_TRACE_ERROR("%s: Cannot write to uhid:%s", __FUNCTION__, strerror(errno)); return rtn; } else if (ret != (ssize_t)sizeof(*ev)) { APPL_TRACE_ERROR("%s: Wrong size written to uhid: %zd != %zu", __FUNCTION__, ret, sizeof(*ev)); return -EFAULT; } return 0; } Commit Message: DO NOT MERGE Fix potential DoS caused by delivering signal to BT process Bug: 28885210 Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360 Conflicts: btif/co/bta_hh_co.c btif/src/btif_core.c Merge conflict resolution of ag/1161415 (referencing ag/1164670) - Directly into mnc-mr2-release CWE ID: CWE-284
static int uhid_write(int fd, const struct uhid_event *ev) { ssize_t ret = TEMP_FAILURE_RETRY(write(fd, ev, sizeof(*ev))); if (ret < 0){ int rtn = -errno; APPL_TRACE_ERROR("%s: Cannot write to uhid:%s", __FUNCTION__, strerror(errno)); return rtn; } else if (ret != (ssize_t)sizeof(*ev)) { APPL_TRACE_ERROR("%s: Wrong size written to uhid: %zd != %zu", __FUNCTION__, ret, sizeof(*ev)); return -EFAULT; } return 0; }
173,433
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int check_stack_boundary(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *regs = state->regs; int off, i, slot, spi; if (regs[regno].type != PTR_TO_STACK) { /* Allow zero-byte read from NULL, regardless of pointer type */ if (zero_size_allowed && access_size == 0 && register_is_null(regs[regno])) return 0; verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[regs[regno].type], reg_type_str[PTR_TO_STACK]); return -EACCES; } /* Only allow fixed-offset stack reads */ if (!tnum_is_const(regs[regno].var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off); verbose(env, "invalid variable stack read R%d var_off=%s\n", regno, tn_buf); } off = regs[regno].off + regs[regno].var_off.value; if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || access_size < 0 || (access_size == 0 && !zero_size_allowed)) { verbose(env, "invalid stack type R%d off=%d access_size=%d\n", regno, off, access_size); return -EACCES; } if (env->prog->aux->stack_depth < -off) env->prog->aux->stack_depth = -off; if (meta && meta->raw_mode) { meta->access_size = access_size; meta->regno = regno; return 0; } for (i = 0; i < access_size; i++) { slot = -(off + i) - 1; spi = slot / BPF_REG_SIZE; if (state->allocated_stack <= slot || state->stack[spi].slot_type[slot % BPF_REG_SIZE] != STACK_MISC) { verbose(env, "invalid indirect read from stack off %d+%d size %d\n", off, i, access_size); return -EACCES; } } return 0; } Commit Message: bpf: fix missing error return in check_stack_boundary() Prevent indirect stack accesses at non-constant addresses, which would permit reading and corrupting spilled pointers. Fixes: f1174f77b50c ("bpf/verifier: rework value tracking") Signed-off-by: Jann Horn <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> CWE ID: CWE-119
static int check_stack_boundary(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *regs = state->regs; int off, i, slot, spi; if (regs[regno].type != PTR_TO_STACK) { /* Allow zero-byte read from NULL, regardless of pointer type */ if (zero_size_allowed && access_size == 0 && register_is_null(regs[regno])) return 0; verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[regs[regno].type], reg_type_str[PTR_TO_STACK]); return -EACCES; } /* Only allow fixed-offset stack reads */ if (!tnum_is_const(regs[regno].var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off); verbose(env, "invalid variable stack read R%d var_off=%s\n", regno, tn_buf); return -EACCES; } off = regs[regno].off + regs[regno].var_off.value; if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || access_size < 0 || (access_size == 0 && !zero_size_allowed)) { verbose(env, "invalid stack type R%d off=%d access_size=%d\n", regno, off, access_size); return -EACCES; } if (env->prog->aux->stack_depth < -off) env->prog->aux->stack_depth = -off; if (meta && meta->raw_mode) { meta->access_size = access_size; meta->regno = regno; return 0; } for (i = 0; i < access_size; i++) { slot = -(off + i) - 1; spi = slot / BPF_REG_SIZE; if (state->allocated_stack <= slot || state->stack[spi].slot_type[slot % BPF_REG_SIZE] != STACK_MISC) { verbose(env, "invalid indirect read from stack off %d+%d size %d\n", off, i, access_size); return -EACCES; } } return 0; }
167,640
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void registerRewriteURL(const char* fromURL, const char* toURL) { m_rewriteURLs.add(fromURL, toURL); } Commit Message: Revert 162155 "This review merges the two existing page serializ..." Change r162155 broke the world even though it was landed using the CQ. > This review merges the two existing page serializers, WebPageSerializerImpl and > PageSerializer, into one, PageSerializer. In addition to this it moves all > the old tests from WebPageNewSerializerTest and WebPageSerializerTest to the > PageSerializerTest structure and splits out one test for MHTML into a new > MHTMLTest file. > > Saving as 'Webpage, Complete', 'Webpage, HTML Only' and as MHTML when the > 'Save Page as MHTML' flag is enabled now uses the same code, and should thus > have the same feature set. Meaning that both modes now should be a bit better. > > Detailed list of changes: > > - PageSerializerTest: Prepare for more DTD test > - PageSerializerTest: Remove now unneccesary input image test > - PageSerializerTest: Remove unused WebPageSerializer/Impl code > - PageSerializerTest: Move data URI morph test > - PageSerializerTest: Move data URI test > - PageSerializerTest: Move namespace test > - PageSerializerTest: Move SVG Image test > - MHTMLTest: Move MHTML specific test to own test file > - PageSerializerTest: Delete duplicate XML header test > - PageSerializerTest: Move blank frame test > - PageSerializerTest: Move CSS test > - PageSerializerTest: Add frameset/frame test > - PageSerializerTest: Move old iframe test > - PageSerializerTest: Move old elements test > - Use PageSerizer for saving web pages > - PageSerializerTest: Test for rewriting links > - PageSerializer: Add rewrite link accumulator > - PageSerializer: Serialize images in iframes/frames src > - PageSerializer: XHTML fix for meta tags > - PageSerializer: Add presentation CSS > - PageSerializer: Rename out parameter > > BUG= > [email protected] > > Review URL: https://codereview.chromium.org/68613003 [email protected] Review URL: https://codereview.chromium.org/73673003 git-svn-id: svn://svn.chromium.org/blink/trunk@162156 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-119
void registerRewriteURL(const char* fromURL, const char* toURL)
171,573
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification) { int ret; struct fd f; struct sock *sock; struct inode *inode; struct mqueue_inode_info *info; struct sk_buff *nc; audit_mq_notify(mqdes, notification); nc = NULL; sock = NULL; if (notification != NULL) { if (unlikely(notification->sigev_notify != SIGEV_NONE && notification->sigev_notify != SIGEV_SIGNAL && notification->sigev_notify != SIGEV_THREAD)) return -EINVAL; if (notification->sigev_notify == SIGEV_SIGNAL && !valid_signal(notification->sigev_signo)) { return -EINVAL; } if (notification->sigev_notify == SIGEV_THREAD) { long timeo; /* create the notify skb */ nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); if (!nc) { ret = -ENOMEM; goto out; } if (copy_from_user(nc->data, notification->sigev_value.sival_ptr, NOTIFY_COOKIE_LEN)) { ret = -EFAULT; goto out; } /* TODO: add a header? */ skb_put(nc, NOTIFY_COOKIE_LEN); /* and attach it to the socket */ retry: f = fdget(notification->sigev_signo); if (!f.file) { ret = -EBADF; goto out; } sock = netlink_getsockbyfilp(f.file); fdput(f); if (IS_ERR(sock)) { ret = PTR_ERR(sock); sock = NULL; goto out; } timeo = MAX_SCHEDULE_TIMEOUT; ret = netlink_attachskb(sock, nc, &timeo, NULL); if (ret == 1) goto retry; if (ret) { sock = NULL; nc = NULL; goto out; } } } f = fdget(mqdes); if (!f.file) { ret = -EBADF; goto out; } inode = file_inode(f.file); if (unlikely(f.file->f_op != &mqueue_file_operations)) { ret = -EBADF; goto out_fput; } info = MQUEUE_I(inode); ret = 0; spin_lock(&info->lock); if (notification == NULL) { if (info->notify_owner == task_tgid(current)) { remove_notification(info); inode->i_atime = inode->i_ctime = current_time(inode); } } else if (info->notify_owner != NULL) { ret = -EBUSY; } else { switch (notification->sigev_notify) { case SIGEV_NONE: info->notify.sigev_notify = SIGEV_NONE; break; case SIGEV_THREAD: info->notify_sock = sock; info->notify_cookie = nc; sock = NULL; nc = NULL; info->notify.sigev_notify = SIGEV_THREAD; break; case SIGEV_SIGNAL: info->notify.sigev_signo = notification->sigev_signo; info->notify.sigev_value = notification->sigev_value; info->notify.sigev_notify = SIGEV_SIGNAL; break; } info->notify_owner = get_pid(task_tgid(current)); info->notify_user_ns = get_user_ns(current_user_ns()); inode->i_atime = inode->i_ctime = current_time(inode); } spin_unlock(&info->lock); out_fput: fdput(f); out: if (sock) netlink_detachskb(sock, nc); else if (nc) dev_kfree_skb(nc); return ret; } Commit Message: mqueue: fix a use-after-free in sys_mq_notify() The retry logic for netlink_attachskb() inside sys_mq_notify() is nasty and vulnerable: 1) The sock refcnt is already released when retry is needed 2) The fd is controllable by user-space because we already release the file refcnt so we when retry but the fd has been just closed by user-space during this small window, we end up calling netlink_detachskb() on the error path which releases the sock again, later when the user-space closes this socket a use-after-free could be triggered. Setting 'sock' to NULL here should be sufficient to fix it. Reported-by: GeneBlue <[email protected]> Signed-off-by: Cong Wang <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Manfred Spraul <[email protected]> Cc: [email protected] Signed-off-by: Linus Torvalds <[email protected]> CWE ID: CWE-416
static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification) { int ret; struct fd f; struct sock *sock; struct inode *inode; struct mqueue_inode_info *info; struct sk_buff *nc; audit_mq_notify(mqdes, notification); nc = NULL; sock = NULL; if (notification != NULL) { if (unlikely(notification->sigev_notify != SIGEV_NONE && notification->sigev_notify != SIGEV_SIGNAL && notification->sigev_notify != SIGEV_THREAD)) return -EINVAL; if (notification->sigev_notify == SIGEV_SIGNAL && !valid_signal(notification->sigev_signo)) { return -EINVAL; } if (notification->sigev_notify == SIGEV_THREAD) { long timeo; /* create the notify skb */ nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); if (!nc) { ret = -ENOMEM; goto out; } if (copy_from_user(nc->data, notification->sigev_value.sival_ptr, NOTIFY_COOKIE_LEN)) { ret = -EFAULT; goto out; } /* TODO: add a header? */ skb_put(nc, NOTIFY_COOKIE_LEN); /* and attach it to the socket */ retry: f = fdget(notification->sigev_signo); if (!f.file) { ret = -EBADF; goto out; } sock = netlink_getsockbyfilp(f.file); fdput(f); if (IS_ERR(sock)) { ret = PTR_ERR(sock); sock = NULL; goto out; } timeo = MAX_SCHEDULE_TIMEOUT; ret = netlink_attachskb(sock, nc, &timeo, NULL); if (ret == 1) { sock = NULL; goto retry; } if (ret) { sock = NULL; nc = NULL; goto out; } } } f = fdget(mqdes); if (!f.file) { ret = -EBADF; goto out; } inode = file_inode(f.file); if (unlikely(f.file->f_op != &mqueue_file_operations)) { ret = -EBADF; goto out_fput; } info = MQUEUE_I(inode); ret = 0; spin_lock(&info->lock); if (notification == NULL) { if (info->notify_owner == task_tgid(current)) { remove_notification(info); inode->i_atime = inode->i_ctime = current_time(inode); } } else if (info->notify_owner != NULL) { ret = -EBUSY; } else { switch (notification->sigev_notify) { case SIGEV_NONE: info->notify.sigev_notify = SIGEV_NONE; break; case SIGEV_THREAD: info->notify_sock = sock; info->notify_cookie = nc; sock = NULL; nc = NULL; info->notify.sigev_notify = SIGEV_THREAD; break; case SIGEV_SIGNAL: info->notify.sigev_signo = notification->sigev_signo; info->notify.sigev_value = notification->sigev_value; info->notify.sigev_notify = SIGEV_SIGNAL; break; } info->notify_owner = get_pid(task_tgid(current)); info->notify_user_ns = get_user_ns(current_user_ns()); inode->i_atime = inode->i_ctime = current_time(inode); } spin_unlock(&info->lock); out_fput: fdput(f); out: if (sock) netlink_detachskb(sock, nc); else if (nc) dev_kfree_skb(nc); return ret; }
168,047
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int test_sqr(BIO *bp, BN_CTX *ctx) { BIGNUM a,c,d,e; int i; BN_init(&a); BN_init(&c); BN_init(&d); BN_init(&e); for (i=0; i<num0; i++) { BN_bntest_rand(&a,40+i*10,0,0); a.neg=rand_neg(); BN_sqr(&c,&a,ctx); if (bp != NULL) { if (!results) { BN_print(bp,&a); BIO_puts(bp," * "); BN_print(bp,&a); BIO_puts(bp," - "); } BN_print(bp,&c); BIO_puts(bp,"\n"); } BN_div(&d,&e,&c,&a,ctx); BN_sub(&d,&d,&a); if(!BN_is_zero(&d) || !BN_is_zero(&e)) { fprintf(stderr,"Square test failed!\n"); return 0; } } BN_free(&a); BN_free(&c); BN_free(&d); BN_free(&e); return(1); } Commit Message: Fix for CVE-2014-3570 (with minor bn_asm.c revamp). Reviewed-by: Emilia Kasper <[email protected]> CWE ID: CWE-310
int test_sqr(BIO *bp, BN_CTX *ctx) { BIGNUM *a,*c,*d,*e; int i, ret = 0; a = BN_new(); c = BN_new(); d = BN_new(); e = BN_new(); if (a == NULL || c == NULL || d == NULL || e == NULL) { goto err; } for (i=0; i<num0; i++) { BN_bntest_rand(a,40+i*10,0,0); a->neg=rand_neg(); BN_sqr(c,a,ctx); if (bp != NULL) { if (!results) { BN_print(bp,a); BIO_puts(bp," * "); BN_print(bp,a); BIO_puts(bp," - "); } BN_print(bp,c); BIO_puts(bp,"\n"); } BN_div(d,e,c,a,ctx); BN_sub(d,d,a); if(!BN_is_zero(d) || !BN_is_zero(e)) { fprintf(stderr,"Square test failed!\n"); goto err; } } /* Regression test for a BN_sqr overflow bug. */ BN_hex2bn(&a, "80000000000000008000000000000001FFFFFFFFFFFFFFFE0000000000000000"); BN_sqr(c, a, ctx); if (bp != NULL) { if (!results) { BN_print(bp,a); BIO_puts(bp," * "); BN_print(bp,a); BIO_puts(bp," - "); } BN_print(bp,c); BIO_puts(bp,"\n"); } BN_mul(d, a, a, ctx); if (BN_cmp(c, d)) { fprintf(stderr, "Square test failed: BN_sqr and BN_mul produce " "different results!\n"); goto err; } /* Regression test for a BN_sqr overflow bug. */ BN_hex2bn(&a, "80000000000000000000000080000001FFFFFFFE000000000000000000000000"); BN_sqr(c, a, ctx); if (bp != NULL) { if (!results) { BN_print(bp,a); BIO_puts(bp," * "); BN_print(bp,a); BIO_puts(bp," - "); } BN_print(bp,c); BIO_puts(bp,"\n"); } BN_mul(d, a, a, ctx); if (BN_cmp(c, d)) { fprintf(stderr, "Square test failed: BN_sqr and BN_mul produce " "different results!\n"); goto err; } ret = 1; err: if (a != NULL) BN_free(a); if (c != NULL) BN_free(c); if (d != NULL) BN_free(d); if (e != NULL) BN_free(e); return ret; }
166,832
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: krb5_init_creds_step(krb5_context context, krb5_init_creds_context ctx, krb5_data *in, krb5_data *out, krb5_krbhst_info *hostinfo, unsigned int *flags) { krb5_error_code ret; size_t len = 0; size_t size; AS_REQ req2; krb5_data_zero(out); if (ctx->as_req.req_body.cname == NULL) { ret = init_as_req(context, ctx->flags, &ctx->cred, ctx->addrs, ctx->etypes, &ctx->as_req); if (ret) { free_init_creds_ctx(context, ctx); return ret; } } #define MAX_PA_COUNTER 10 if (ctx->pa_counter > MAX_PA_COUNTER) { krb5_set_error_message(context, KRB5_GET_IN_TKT_LOOP, N_("Looping %d times while getting " "initial credentials", ""), ctx->pa_counter); return KRB5_GET_IN_TKT_LOOP; } ctx->pa_counter++; _krb5_debug(context, 5, "krb5_get_init_creds: loop %d", ctx->pa_counter); /* Lets process the input packet */ if (in && in->length) { krb5_kdc_rep rep; memset(&rep, 0, sizeof(rep)); _krb5_debug(context, 5, "krb5_get_init_creds: processing input"); ret = decode_AS_REP(in->data, in->length, &rep.kdc_rep, &size); if (ret == 0) { unsigned eflags = EXTRACT_TICKET_AS_REQ | EXTRACT_TICKET_TIMESYNC; krb5_data data; /* * Unwrap AS-REP */ ASN1_MALLOC_ENCODE(Ticket, data.data, data.length, &rep.kdc_rep.ticket, &size, ret); if (ret) goto out; heim_assert(data.length == size, "ASN.1 internal error"); ret = fast_unwrap_as_rep(context, ctx->nonce, &data, &ctx->fast_state, &rep.kdc_rep); krb5_data_free(&data); if (ret) goto out; /* * Now check and extract the ticket */ if (ctx->flags.canonicalize) { eflags |= EXTRACT_TICKET_ALLOW_SERVER_MISMATCH; eflags |= EXTRACT_TICKET_MATCH_REALM; } if (ctx->ic_flags & KRB5_INIT_CREDS_NO_C_CANON_CHECK) eflags |= EXTRACT_TICKET_ALLOW_CNAME_MISMATCH; ret = process_pa_data_to_key(context, ctx, &ctx->cred, &ctx->as_req, &rep.kdc_rep, hostinfo, &ctx->fast_state.reply_key); if (ret) { free_AS_REP(&rep.kdc_rep); goto out; } _krb5_debug(context, 5, "krb5_get_init_creds: extracting ticket"); ret = _krb5_extract_ticket(context, &rep, &ctx->cred, ctx->fast_state.reply_key, NULL, KRB5_KU_AS_REP_ENC_PART, NULL, ctx->nonce, eflags, &ctx->req_buffer, NULL, NULL); if (ret == 0) ret = copy_EncKDCRepPart(&rep.enc_part, &ctx->enc_part); krb5_free_keyblock(context, ctx->fast_state.reply_key); ctx->fast_state.reply_key = NULL; *flags = 0; free_AS_REP(&rep.kdc_rep); free_EncASRepPart(&rep.enc_part); return ret; } else { /* let's try to parse it as a KRB-ERROR */ _krb5_debug(context, 5, "krb5_get_init_creds: got an error"); free_KRB_ERROR(&ctx->error); ret = krb5_rd_error(context, in, &ctx->error); if(ret && in->length && ((char*)in->data)[0] == 4) ret = KRB5KRB_AP_ERR_V4_REPLY; if (ret) { _krb5_debug(context, 5, "krb5_get_init_creds: failed to read error"); goto out; } /* * Unwrap KRB-ERROR */ ret = fast_unwrap_error(context, &ctx->fast_state, &ctx->error); if (ret) goto out; /* * */ ret = krb5_error_from_rd_error(context, &ctx->error, &ctx->cred); _krb5_debug(context, 5, "krb5_get_init_creds: KRB-ERROR %d", ret); /* * If no preauth was set and KDC requires it, give it one * more try. */ if (ret == KRB5KDC_ERR_PREAUTH_REQUIRED) { free_METHOD_DATA(&ctx->md); memset(&ctx->md, 0, sizeof(ctx->md)); if (ctx->error.e_data) { ret = decode_METHOD_DATA(ctx->error.e_data->data, ctx->error.e_data->length, &ctx->md, NULL); if (ret) krb5_set_error_message(context, ret, N_("Failed to decode METHOD-DATA", "")); } else { krb5_set_error_message(context, ret, N_("Preauth required but no preauth " "options send by KDC", "")); } } else if (ret == KRB5KRB_AP_ERR_SKEW && context->kdc_sec_offset == 0) { /* * Try adapt to timeskrew when we are using pre-auth, and * if there was a time skew, try again. */ krb5_set_real_time(context, ctx->error.stime, -1); if (context->kdc_sec_offset) ret = 0; _krb5_debug(context, 10, "init_creds: err skew updateing kdc offset to %d", context->kdc_sec_offset); ctx->used_pa_types = 0; } else if (ret == KRB5_KDC_ERR_WRONG_REALM && ctx->flags.canonicalize) { /* client referal to a new realm */ if (ctx->error.crealm == NULL) { krb5_set_error_message(context, ret, N_("Got a client referral, not but no realm", "")); goto out; } _krb5_debug(context, 5, "krb5_get_init_creds: got referal to realm %s", *ctx->error.crealm); ret = krb5_principal_set_realm(context, ctx->cred.client, *ctx->error.crealm); if (ret) goto out; if (krb5_principal_is_krbtgt(context, ctx->cred.server)) { ret = krb5_init_creds_set_service(context, ctx, NULL); if (ret) goto out; } free_AS_REQ(&ctx->as_req); memset(&ctx->as_req, 0, sizeof(ctx->as_req)); ctx->used_pa_types = 0; } else if (ret == KRB5KDC_ERR_KEY_EXP && ctx->runflags.change_password == 0 && ctx->prompter) { char buf2[1024]; ctx->runflags.change_password = 1; ctx->prompter(context, ctx->prompter_data, NULL, N_("Password has expired", ""), 0, NULL); /* try to avoid recursion */ if (ctx->in_tkt_service != NULL && strcmp(ctx->in_tkt_service, "kadmin/changepw") == 0) goto out; /* don't try to change password where then where none */ if (ctx->prompter == NULL) goto out; ret = change_password(context, ctx->cred.client, ctx->password, buf2, sizeof(buf2), ctx->prompter, ctx->prompter_data, NULL); if (ret) goto out; krb5_init_creds_set_password(context, ctx, buf2); ctx->used_pa_types = 0; ret = 0; } else if (ret == KRB5KDC_ERR_PREAUTH_FAILED) { if (ctx->fast_state.flags & KRB5_FAST_DISABLED) goto out; if (ctx->fast_state.flags & (KRB5_FAST_REQUIRED | KRB5_FAST_EXPECTED)) goto out; _krb5_debug(context, 10, "preauth failed with FAST, " "and told by KD or user, trying w/o FAST"); ctx->fast_state.flags |= KRB5_FAST_DISABLED; ctx->used_pa_types = 0; ret = 0; } if (ret) goto out; } } if (ctx->as_req.req_body.cname == NULL) { ret = init_as_req(context, ctx->flags, &ctx->cred, ctx->addrs, ctx->etypes, &ctx->as_req); if (ret) { free_init_creds_ctx(context, ctx); return ret; } } if (ctx->as_req.padata) { free_METHOD_DATA(ctx->as_req.padata); free(ctx->as_req.padata); ctx->as_req.padata = NULL; } /* Set a new nonce. */ ctx->as_req.req_body.nonce = ctx->nonce; /* fill_in_md_data */ ret = process_pa_data_to_md(context, &ctx->cred, &ctx->as_req, ctx, &ctx->md, &ctx->as_req.padata, ctx->prompter, ctx->prompter_data); if (ret) goto out; /* * Wrap with FAST */ copy_AS_REQ(&ctx->as_req, &req2); ret = fast_wrap_req(context, &ctx->fast_state, &req2); if (ret) { free_AS_REQ(&req2); goto out; } krb5_data_free(&ctx->req_buffer); ASN1_MALLOC_ENCODE(AS_REQ, ctx->req_buffer.data, ctx->req_buffer.length, &req2, &len, ret); free_AS_REQ(&req2); if (ret) goto out; if(len != ctx->req_buffer.length) krb5_abortx(context, "internal error in ASN.1 encoder"); out->data = ctx->req_buffer.data; out->length = ctx->req_buffer.length; *flags = KRB5_INIT_CREDS_STEP_FLAG_CONTINUE; return 0; out: return ret; } Commit Message: CVE-2019-12098: krb5: always confirm PA-PKINIT-KX for anon PKINIT RFC8062 Section 7 requires verification of the PA-PKINIT-KX key excahnge when anonymous PKINIT is used. Failure to do so can permit an active attacker to become a man-in-the-middle. Introduced by a1ef548600c5bb51cf52a9a9ea12676506ede19f. First tagged release Heimdal 1.4.0. CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N (4.8) Change-Id: I6cc1c0c24985936468af08693839ac6c3edda133 Signed-off-by: Jeffrey Altman <[email protected]> Approved-by: Jeffrey Altman <[email protected]> (cherry picked from commit 38c797e1ae9b9c8f99ae4aa2e73957679031fd2b) CWE ID: CWE-320
krb5_init_creds_step(krb5_context context, krb5_init_creds_context ctx, krb5_data *in, krb5_data *out, krb5_krbhst_info *hostinfo, unsigned int *flags) { krb5_error_code ret; size_t len = 0; size_t size; AS_REQ req2; krb5_data_zero(out); if (ctx->as_req.req_body.cname == NULL) { ret = init_as_req(context, ctx->flags, &ctx->cred, ctx->addrs, ctx->etypes, &ctx->as_req); if (ret) { free_init_creds_ctx(context, ctx); return ret; } } #define MAX_PA_COUNTER 10 if (ctx->pa_counter > MAX_PA_COUNTER) { krb5_set_error_message(context, KRB5_GET_IN_TKT_LOOP, N_("Looping %d times while getting " "initial credentials", ""), ctx->pa_counter); return KRB5_GET_IN_TKT_LOOP; } ctx->pa_counter++; _krb5_debug(context, 5, "krb5_get_init_creds: loop %d", ctx->pa_counter); /* Lets process the input packet */ if (in && in->length) { krb5_kdc_rep rep; memset(&rep, 0, sizeof(rep)); _krb5_debug(context, 5, "krb5_get_init_creds: processing input"); ret = decode_AS_REP(in->data, in->length, &rep.kdc_rep, &size); if (ret == 0) { unsigned eflags = EXTRACT_TICKET_AS_REQ | EXTRACT_TICKET_TIMESYNC; krb5_data data; /* * Unwrap AS-REP */ ASN1_MALLOC_ENCODE(Ticket, data.data, data.length, &rep.kdc_rep.ticket, &size, ret); if (ret) goto out; heim_assert(data.length == size, "ASN.1 internal error"); ret = fast_unwrap_as_rep(context, ctx->nonce, &data, &ctx->fast_state, &rep.kdc_rep); krb5_data_free(&data); if (ret) goto out; /* * Now check and extract the ticket */ if (ctx->flags.canonicalize) { eflags |= EXTRACT_TICKET_ALLOW_SERVER_MISMATCH; eflags |= EXTRACT_TICKET_MATCH_REALM; } if (ctx->ic_flags & KRB5_INIT_CREDS_NO_C_CANON_CHECK) eflags |= EXTRACT_TICKET_ALLOW_CNAME_MISMATCH; ret = process_pa_data_to_key(context, ctx, &ctx->cred, &ctx->as_req, &rep.kdc_rep, hostinfo, &ctx->fast_state.reply_key); if (ret) { free_AS_REP(&rep.kdc_rep); goto out; } _krb5_debug(context, 5, "krb5_get_init_creds: extracting ticket"); ret = _krb5_extract_ticket(context, &rep, &ctx->cred, ctx->fast_state.reply_key, NULL, KRB5_KU_AS_REP_ENC_PART, NULL, ctx->nonce, eflags, &ctx->req_buffer, NULL, NULL); if (ret == 0 && ctx->pk_init_ctx) { PA_DATA *pa_pkinit_kx; int idx = 0; pa_pkinit_kx = krb5_find_padata(rep.kdc_rep.padata->val, rep.kdc_rep.padata->len, KRB5_PADATA_PKINIT_KX, &idx); ret = _krb5_pk_kx_confirm(context, ctx->pk_init_ctx, ctx->fast_state.reply_key, &ctx->cred.session, pa_pkinit_kx); if (ret) krb5_set_error_message(context, ret, N_("Failed to confirm PA-PKINIT-KX", "")); else if (pa_pkinit_kx != NULL) ctx->ic_flags |= KRB5_INIT_CREDS_PKINIT_KX_VALID; } if (ret == 0) ret = copy_EncKDCRepPart(&rep.enc_part, &ctx->enc_part); krb5_free_keyblock(context, ctx->fast_state.reply_key); ctx->fast_state.reply_key = NULL; *flags = 0; free_AS_REP(&rep.kdc_rep); free_EncASRepPart(&rep.enc_part); return ret; } else { /* let's try to parse it as a KRB-ERROR */ _krb5_debug(context, 5, "krb5_get_init_creds: got an error"); free_KRB_ERROR(&ctx->error); ret = krb5_rd_error(context, in, &ctx->error); if(ret && in->length && ((char*)in->data)[0] == 4) ret = KRB5KRB_AP_ERR_V4_REPLY; if (ret) { _krb5_debug(context, 5, "krb5_get_init_creds: failed to read error"); goto out; } /* * Unwrap KRB-ERROR */ ret = fast_unwrap_error(context, &ctx->fast_state, &ctx->error); if (ret) goto out; /* * */ ret = krb5_error_from_rd_error(context, &ctx->error, &ctx->cred); _krb5_debug(context, 5, "krb5_get_init_creds: KRB-ERROR %d", ret); /* * If no preauth was set and KDC requires it, give it one * more try. */ if (ret == KRB5KDC_ERR_PREAUTH_REQUIRED) { free_METHOD_DATA(&ctx->md); memset(&ctx->md, 0, sizeof(ctx->md)); if (ctx->error.e_data) { ret = decode_METHOD_DATA(ctx->error.e_data->data, ctx->error.e_data->length, &ctx->md, NULL); if (ret) krb5_set_error_message(context, ret, N_("Failed to decode METHOD-DATA", "")); } else { krb5_set_error_message(context, ret, N_("Preauth required but no preauth " "options send by KDC", "")); } } else if (ret == KRB5KRB_AP_ERR_SKEW && context->kdc_sec_offset == 0) { /* * Try adapt to timeskrew when we are using pre-auth, and * if there was a time skew, try again. */ krb5_set_real_time(context, ctx->error.stime, -1); if (context->kdc_sec_offset) ret = 0; _krb5_debug(context, 10, "init_creds: err skew updateing kdc offset to %d", context->kdc_sec_offset); ctx->used_pa_types = 0; } else if (ret == KRB5_KDC_ERR_WRONG_REALM && ctx->flags.canonicalize) { /* client referal to a new realm */ if (ctx->error.crealm == NULL) { krb5_set_error_message(context, ret, N_("Got a client referral, not but no realm", "")); goto out; } _krb5_debug(context, 5, "krb5_get_init_creds: got referal to realm %s", *ctx->error.crealm); ret = krb5_principal_set_realm(context, ctx->cred.client, *ctx->error.crealm); if (ret) goto out; if (krb5_principal_is_krbtgt(context, ctx->cred.server)) { ret = krb5_init_creds_set_service(context, ctx, NULL); if (ret) goto out; } free_AS_REQ(&ctx->as_req); memset(&ctx->as_req, 0, sizeof(ctx->as_req)); ctx->used_pa_types = 0; } else if (ret == KRB5KDC_ERR_KEY_EXP && ctx->runflags.change_password == 0 && ctx->prompter) { char buf2[1024]; ctx->runflags.change_password = 1; ctx->prompter(context, ctx->prompter_data, NULL, N_("Password has expired", ""), 0, NULL); /* try to avoid recursion */ if (ctx->in_tkt_service != NULL && strcmp(ctx->in_tkt_service, "kadmin/changepw") == 0) goto out; /* don't try to change password where then where none */ if (ctx->prompter == NULL) goto out; ret = change_password(context, ctx->cred.client, ctx->password, buf2, sizeof(buf2), ctx->prompter, ctx->prompter_data, NULL); if (ret) goto out; krb5_init_creds_set_password(context, ctx, buf2); ctx->used_pa_types = 0; ret = 0; } else if (ret == KRB5KDC_ERR_PREAUTH_FAILED) { if (ctx->fast_state.flags & KRB5_FAST_DISABLED) goto out; if (ctx->fast_state.flags & (KRB5_FAST_REQUIRED | KRB5_FAST_EXPECTED)) goto out; _krb5_debug(context, 10, "preauth failed with FAST, " "and told by KD or user, trying w/o FAST"); ctx->fast_state.flags |= KRB5_FAST_DISABLED; ctx->used_pa_types = 0; ret = 0; } if (ret) goto out; } } if (ctx->as_req.req_body.cname == NULL) { ret = init_as_req(context, ctx->flags, &ctx->cred, ctx->addrs, ctx->etypes, &ctx->as_req); if (ret) { free_init_creds_ctx(context, ctx); return ret; } } if (ctx->as_req.padata) { free_METHOD_DATA(ctx->as_req.padata); free(ctx->as_req.padata); ctx->as_req.padata = NULL; } /* Set a new nonce. */ ctx->as_req.req_body.nonce = ctx->nonce; /* fill_in_md_data */ ret = process_pa_data_to_md(context, &ctx->cred, &ctx->as_req, ctx, &ctx->md, &ctx->as_req.padata, ctx->prompter, ctx->prompter_data); if (ret) goto out; /* * Wrap with FAST */ copy_AS_REQ(&ctx->as_req, &req2); ret = fast_wrap_req(context, &ctx->fast_state, &req2); if (ret) { free_AS_REQ(&req2); goto out; } krb5_data_free(&ctx->req_buffer); ASN1_MALLOC_ENCODE(AS_REQ, ctx->req_buffer.data, ctx->req_buffer.length, &req2, &len, ret); free_AS_REQ(&req2); if (ret) goto out; if(len != ctx->req_buffer.length) krb5_abortx(context, "internal error in ASN.1 encoder"); out->data = ctx->req_buffer.data; out->length = ctx->req_buffer.length; *flags = KRB5_INIT_CREDS_STEP_FLAG_CONTINUE; return 0; out: return ret; }
169,670
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int main(int argc, char **argv) { FILE *infile = NULL; vpx_codec_ctx_t codec = {0}; vpx_codec_enc_cfg_t cfg = {0}; int frame_count = 0; vpx_image_t raw; vpx_codec_err_t res; VpxVideoInfo info = {0}; VpxVideoWriter *writer = NULL; const VpxInterface *encoder = NULL; int update_frame_num = 0; const int fps = 30; // TODO(dkovalev) add command line argument const int bitrate = 200; // kbit/s TODO(dkovalev) add command line argument exec_name = argv[0]; if (argc != 6) die("Invalid number of arguments"); encoder = get_vpx_encoder_by_name("vp8"); if (!encoder) die("Unsupported codec."); update_frame_num = atoi(argv[5]); if (!update_frame_num) die("Couldn't parse frame number '%s'\n", argv[5]); info.codec_fourcc = encoder->fourcc; info.frame_width = strtol(argv[1], NULL, 0); info.frame_height = strtol(argv[2], NULL, 0); info.time_base.numerator = 1; info.time_base.denominator = fps; if (info.frame_width <= 0 || info.frame_height <= 0 || (info.frame_width % 2) != 0 || (info.frame_height % 2) != 0) { die("Invalid frame size: %dx%d", info.frame_width, info.frame_height); } if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width, info.frame_height, 1)) { die("Failed to allocate image."); } printf("Using %s\n", vpx_codec_iface_name(encoder->interface())); res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0); if (res) die_codec(&codec, "Failed to get default codec config."); cfg.g_w = info.frame_width; cfg.g_h = info.frame_height; cfg.g_timebase.num = info.time_base.numerator; cfg.g_timebase.den = info.time_base.denominator; cfg.rc_target_bitrate = bitrate; writer = vpx_video_writer_open(argv[4], kContainerIVF, &info); if (!writer) die("Failed to open %s for writing.", argv[4]); if (!(infile = fopen(argv[3], "rb"))) die("Failed to open %s for reading.", argv[3]); if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0)) die_codec(&codec, "Failed to initialize encoder"); while (vpx_img_read(&raw, infile)) { if (frame_count + 1 == update_frame_num) { vpx_ref_frame_t ref; ref.frame_type = VP8_LAST_FRAME; ref.img = raw; if (vpx_codec_control(&codec, VP8_SET_REFERENCE, &ref)) die_codec(&codec, "Failed to set reference frame"); } encode_frame(&codec, &raw, frame_count++, writer); } encode_frame(&codec, NULL, -1, writer); printf("\n"); fclose(infile); printf("Processed %d frames.\n", frame_count); vpx_img_free(&raw); if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec."); vpx_video_writer_close(writer); return EXIT_SUCCESS; } Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478 DO NOT MERGE - libvpx: Pull from upstream Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06 BUG=23452792 Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec CWE ID: CWE-119
int main(int argc, char **argv) { FILE *infile = NULL; vpx_codec_ctx_t codec = {0}; vpx_codec_enc_cfg_t cfg = {0}; int frame_count = 0; vpx_image_t raw; vpx_codec_err_t res; VpxVideoInfo info = {0}; VpxVideoWriter *writer = NULL; const VpxInterface *encoder = NULL; int update_frame_num = 0; const int fps = 30; // TODO(dkovalev) add command line argument const int bitrate = 200; // kbit/s TODO(dkovalev) add command line argument exec_name = argv[0]; if (argc != 6) die("Invalid number of arguments"); encoder = get_vpx_encoder_by_name("vp8"); if (!encoder) die("Unsupported codec."); update_frame_num = atoi(argv[5]); if (!update_frame_num) die("Couldn't parse frame number '%s'\n", argv[5]); info.codec_fourcc = encoder->fourcc; info.frame_width = strtol(argv[1], NULL, 0); info.frame_height = strtol(argv[2], NULL, 0); info.time_base.numerator = 1; info.time_base.denominator = fps; if (info.frame_width <= 0 || info.frame_height <= 0 || (info.frame_width % 2) != 0 || (info.frame_height % 2) != 0) { die("Invalid frame size: %dx%d", info.frame_width, info.frame_height); } if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width, info.frame_height, 1)) { die("Failed to allocate image."); } printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface())); res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0); if (res) die_codec(&codec, "Failed to get default codec config."); cfg.g_w = info.frame_width; cfg.g_h = info.frame_height; cfg.g_timebase.num = info.time_base.numerator; cfg.g_timebase.den = info.time_base.denominator; cfg.rc_target_bitrate = bitrate; writer = vpx_video_writer_open(argv[4], kContainerIVF, &info); if (!writer) die("Failed to open %s for writing.", argv[4]); if (!(infile = fopen(argv[3], "rb"))) die("Failed to open %s for reading.", argv[3]); if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0)) die_codec(&codec, "Failed to initialize encoder"); // Encode frames. while (vpx_img_read(&raw, infile)) { if (frame_count + 1 == update_frame_num) { vpx_ref_frame_t ref; ref.frame_type = VP8_LAST_FRAME; ref.img = raw; if (vpx_codec_control(&codec, VP8_SET_REFERENCE, &ref)) die_codec(&codec, "Failed to set reference frame"); } encode_frame(&codec, &raw, frame_count++, writer); } // Flush encoder. while (encode_frame(&codec, NULL, -1, writer)) {} printf("\n"); fclose(infile); printf("Processed %d frames.\n", frame_count); vpx_img_free(&raw); if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec."); vpx_video_writer_close(writer); return EXIT_SUCCESS; }
174,498
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: string_modifier_check(struct magic_set *ms, struct magic *m) { if ((ms->flags & MAGIC_CHECK) == 0) return 0; if (m->type != FILE_PSTRING && (m->str_flags & PSTRING_LEN) != 0) { file_magwarn(ms, "'/BHhLl' modifiers are only allowed for pascal strings\n"); return -1; } switch (m->type) { case FILE_BESTRING16: case FILE_LESTRING16: if (m->str_flags != 0) { file_magwarn(ms, "no modifiers allowed for 16-bit strings\n"); return -1; } break; case FILE_STRING: case FILE_PSTRING: if ((m->str_flags & REGEX_OFFSET_START) != 0) { file_magwarn(ms, "'/%c' only allowed on regex and search\n", CHAR_REGEX_OFFSET_START); return -1; } break; case FILE_SEARCH: if (m->str_range == 0) { file_magwarn(ms, "missing range; defaulting to %d\n", STRING_DEFAULT_RANGE); m->str_range = STRING_DEFAULT_RANGE; return -1; } break; case FILE_REGEX: if ((m->str_flags & STRING_COMPACT_WHITESPACE) != 0) { file_magwarn(ms, "'/%c' not allowed on regex\n", CHAR_COMPACT_WHITESPACE); return -1; } if ((m->str_flags & STRING_COMPACT_OPTIONAL_WHITESPACE) != 0) { file_magwarn(ms, "'/%c' not allowed on regex\n", CHAR_COMPACT_OPTIONAL_WHITESPACE); return -1; } break; default: file_magwarn(ms, "coding error: m->type=%d\n", m->type); return -1; } return 0; } Commit Message: * Enforce limit of 8K on regex searches that have no limits * Allow the l modifier for regex to mean line count. Default to byte count. If line count is specified, assume a max of 80 characters per line to limit the byte count. * Don't allow conversions to be used for dates, allowing the mask field to be used as an offset. * Bump the version of the magic format so that regex changes are visible. CWE ID: CWE-399
string_modifier_check(struct magic_set *ms, struct magic *m) { if ((ms->flags & MAGIC_CHECK) == 0) return 0; if ((m->type != FILE_REGEX || (m->str_flags & REGEX_LINE_COUNT) == 0) && (m->type != FILE_PSTRING && (m->str_flags & PSTRING_LEN) != 0)) { file_magwarn(ms, "'/BHhLl' modifiers are only allowed for pascal strings\n"); return -1; } switch (m->type) { case FILE_BESTRING16: case FILE_LESTRING16: if (m->str_flags != 0) { file_magwarn(ms, "no modifiers allowed for 16-bit strings\n"); return -1; } break; case FILE_STRING: case FILE_PSTRING: if ((m->str_flags & REGEX_OFFSET_START) != 0) { file_magwarn(ms, "'/%c' only allowed on regex and search\n", CHAR_REGEX_OFFSET_START); return -1; } break; case FILE_SEARCH: if (m->str_range == 0) { file_magwarn(ms, "missing range; defaulting to %d\n", STRING_DEFAULT_RANGE); m->str_range = STRING_DEFAULT_RANGE; return -1; } break; case FILE_REGEX: if ((m->str_flags & STRING_COMPACT_WHITESPACE) != 0) { file_magwarn(ms, "'/%c' not allowed on regex\n", CHAR_COMPACT_WHITESPACE); return -1; } if ((m->str_flags & STRING_COMPACT_OPTIONAL_WHITESPACE) != 0) { file_magwarn(ms, "'/%c' not allowed on regex\n", CHAR_COMPACT_OPTIONAL_WHITESPACE); return -1; } break; default: file_magwarn(ms, "coding error: m->type=%d\n", m->type); return -1; } return 0; }
166,356
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void GetLoadTimes(const v8::FunctionCallbackInfo<v8::Value>& args) { WebLocalFrame* frame = WebLocalFrame::frameForCurrentContext(); if (!frame) { args.GetReturnValue().SetNull(); return; } WebDataSource* data_source = frame->dataSource(); if (!data_source) { args.GetReturnValue().SetNull(); return; } DocumentState* document_state = DocumentState::FromDataSource(data_source); if (!document_state) { args.GetReturnValue().SetNull(); return; } double request_time = document_state->request_time().ToDoubleT(); double start_load_time = document_state->start_load_time().ToDoubleT(); double commit_load_time = document_state->commit_load_time().ToDoubleT(); double finish_document_load_time = document_state->finish_document_load_time().ToDoubleT(); double finish_load_time = document_state->finish_load_time().ToDoubleT(); double first_paint_time = document_state->first_paint_time().ToDoubleT(); double first_paint_after_load_time = document_state->first_paint_after_load_time().ToDoubleT(); std::string navigation_type = GetNavigationType(data_source->navigationType()); bool was_fetched_via_spdy = document_state->was_fetched_via_spdy(); bool was_npn_negotiated = document_state->was_npn_negotiated(); std::string npn_negotiated_protocol = document_state->npn_negotiated_protocol(); bool was_alternate_protocol_available = document_state->was_alternate_protocol_available(); std::string connection_info = net::HttpResponseInfo::ConnectionInfoToString( document_state->connection_info()); v8::Isolate* isolate = args.GetIsolate(); v8::Local<v8::Object> load_times = v8::Object::New(isolate); load_times->Set(v8::String::NewFromUtf8(isolate, "requestTime"), v8::Number::New(isolate, request_time)); load_times->Set(v8::String::NewFromUtf8(isolate, "startLoadTime"), v8::Number::New(isolate, start_load_time)); load_times->Set(v8::String::NewFromUtf8(isolate, "commitLoadTime"), v8::Number::New(isolate, commit_load_time)); load_times->Set(v8::String::NewFromUtf8(isolate, "finishDocumentLoadTime"), v8::Number::New(isolate, finish_document_load_time)); load_times->Set(v8::String::NewFromUtf8(isolate, "finishLoadTime"), v8::Number::New(isolate, finish_load_time)); load_times->Set(v8::String::NewFromUtf8(isolate, "firstPaintTime"), v8::Number::New(isolate, first_paint_time)); load_times->Set(v8::String::NewFromUtf8(isolate, "firstPaintAfterLoadTime"), v8::Number::New(isolate, first_paint_after_load_time)); load_times->Set(v8::String::NewFromUtf8(isolate, "navigationType"), v8::String::NewFromUtf8(isolate, navigation_type.c_str())); load_times->Set(v8::String::NewFromUtf8(isolate, "wasFetchedViaSpdy"), v8::Boolean::New(isolate, was_fetched_via_spdy)); load_times->Set(v8::String::NewFromUtf8(isolate, "wasNpnNegotiated"), v8::Boolean::New(isolate, was_npn_negotiated)); load_times->Set( v8::String::NewFromUtf8(isolate, "npnNegotiatedProtocol"), v8::String::NewFromUtf8(isolate, npn_negotiated_protocol.c_str())); load_times->Set( v8::String::NewFromUtf8(isolate, "wasAlternateProtocolAvailable"), v8::Boolean::New(isolate, was_alternate_protocol_available)); load_times->Set(v8::String::NewFromUtf8(isolate, "connectionInfo"), v8::String::NewFromUtf8(isolate, connection_info.c_str())); args.GetReturnValue().Set(load_times); } Commit Message: Cache csi info before passing it to JS setters. JS setters invalidate the pointers frame, data_source and document_state. BUG=590455 Review URL: https://codereview.chromium.org/1751553002 Cr-Commit-Position: refs/heads/master@{#379047} CWE ID:
static void GetLoadTimes(const v8::FunctionCallbackInfo<v8::Value>& args) { args.GetReturnValue().SetNull(); WebLocalFrame* frame = WebLocalFrame::frameForCurrentContext(); if (!frame) { return; } WebDataSource* data_source = frame->dataSource(); if (!data_source) { return; } DocumentState* document_state = DocumentState::FromDataSource(data_source); if (!document_state) { return; } double request_time = document_state->request_time().ToDoubleT(); double start_load_time = document_state->start_load_time().ToDoubleT(); double commit_load_time = document_state->commit_load_time().ToDoubleT(); double finish_document_load_time = document_state->finish_document_load_time().ToDoubleT(); double finish_load_time = document_state->finish_load_time().ToDoubleT(); double first_paint_time = document_state->first_paint_time().ToDoubleT(); double first_paint_after_load_time = document_state->first_paint_after_load_time().ToDoubleT(); std::string navigation_type = GetNavigationType(data_source->navigationType()); bool was_fetched_via_spdy = document_state->was_fetched_via_spdy(); bool was_npn_negotiated = document_state->was_npn_negotiated(); std::string npn_negotiated_protocol = document_state->npn_negotiated_protocol(); bool was_alternate_protocol_available = document_state->was_alternate_protocol_available(); std::string connection_info = net::HttpResponseInfo::ConnectionInfoToString( document_state->connection_info()); v8::Isolate* isolate = args.GetIsolate(); v8::Local<v8::Context> ctx = isolate->GetCurrentContext(); v8::Local<v8::Object> load_times = v8::Object::New(isolate); if (!load_times ->Set(ctx, v8::String::NewFromUtf8(isolate, "requestTime", v8::NewStringType::kNormal) .ToLocalChecked(), v8::Number::New(isolate, request_time)) .FromMaybe(false)) { return; } if (!load_times ->Set(ctx, v8::String::NewFromUtf8(isolate, "startLoadTime", v8::NewStringType::kNormal) .ToLocalChecked(), v8::Number::New(isolate, start_load_time)) .FromMaybe(false)) { return; } if (!load_times ->Set(ctx, v8::String::NewFromUtf8(isolate, "commitLoadTime", v8::NewStringType::kNormal) .ToLocalChecked(), v8::Number::New(isolate, commit_load_time)) .FromMaybe(false)) { return; } if (!load_times ->Set(ctx, v8::String::NewFromUtf8(isolate, "finishDocumentLoadTime", v8::NewStringType::kNormal) .ToLocalChecked(), v8::Number::New(isolate, finish_document_load_time)) .FromMaybe(false)) { return; } if (!load_times ->Set(ctx, v8::String::NewFromUtf8(isolate, "finishLoadTime", v8::NewStringType::kNormal) .ToLocalChecked(), v8::Number::New(isolate, finish_load_time)) .FromMaybe(false)) { return; } if (!load_times ->Set(ctx, v8::String::NewFromUtf8(isolate, "firstPaintTime", v8::NewStringType::kNormal) .ToLocalChecked(), v8::Number::New(isolate, first_paint_time)) .FromMaybe(false)) { return; } if (!load_times ->Set(ctx, v8::String::NewFromUtf8(isolate, "firstPaintAfterLoadTime", v8::NewStringType::kNormal) .ToLocalChecked(), v8::Number::New(isolate, first_paint_after_load_time)) .FromMaybe(false)) { return; } if (!load_times ->Set(ctx, v8::String::NewFromUtf8(isolate, "navigationType", v8::NewStringType::kNormal) .ToLocalChecked(), v8::String::NewFromUtf8(isolate, navigation_type.c_str(), v8::NewStringType::kNormal) .ToLocalChecked()) .FromMaybe(false)) { return; } if (!load_times ->Set(ctx, v8::String::NewFromUtf8(isolate, "wasFetchedViaSpdy", v8::NewStringType::kNormal) .ToLocalChecked(), v8::Boolean::New(isolate, was_fetched_via_spdy)) .FromMaybe(false)) { return; } if (!load_times ->Set(ctx, v8::String::NewFromUtf8(isolate, "wasNpnNegotiated", v8::NewStringType::kNormal) .ToLocalChecked(), v8::Boolean::New(isolate, was_npn_negotiated)) .FromMaybe(false)) { return; } if (!load_times ->Set(ctx, v8::String::NewFromUtf8(isolate, "npnNegotiatedProtocol", v8::NewStringType::kNormal) .ToLocalChecked(), v8::String::NewFromUtf8(isolate, npn_negotiated_protocol.c_str(), v8::NewStringType::kNormal) .ToLocalChecked()) .FromMaybe(false)) { return; } if (!load_times ->Set(ctx, v8::String::NewFromUtf8(isolate, "wasAlternateProtocolAvailable", v8::NewStringType::kNormal) .ToLocalChecked(), v8::Boolean::New(isolate, was_alternate_protocol_available)) .FromMaybe(false)) { return; } if (!load_times ->Set(ctx, v8::String::NewFromUtf8(isolate, "connectionInfo", v8::NewStringType::kNormal) .ToLocalChecked(), v8::String::NewFromUtf8(isolate, connection_info.c_str(), v8::NewStringType::kNormal) .ToLocalChecked()) .FromMaybe(false)) { return; } args.GetReturnValue().Set(load_times); }
172,118
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { int result; handle_t *handle = NULL; struct super_block *sb = file_inode(vma->vm_file)->i_sb; bool write = vmf->flags & FAULT_FLAG_WRITE; if (write) { sb_start_pagefault(sb); file_update_time(vma->vm_file); handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, EXT4_DATA_TRANS_BLOCKS(sb)); } if (IS_ERR(handle)) result = VM_FAULT_SIGBUS; else result = __dax_fault(vma, vmf, ext4_get_block_dax, ext4_end_io_unwritten); if (write) { if (!IS_ERR(handle)) ext4_journal_stop(handle); sb_end_pagefault(sb); } return result; } Commit Message: ext4: fix races between page faults and hole punching Currently, page faults and hole punching are completely unsynchronized. This can result in page fault faulting in a page into a range that we are punching after truncate_pagecache_range() has been called and thus we can end up with a page mapped to disk blocks that will be shortly freed. Filesystem corruption will shortly follow. Note that the same race is avoided for truncate by checking page fault offset against i_size but there isn't similar mechanism available for punching holes. Fix the problem by creating new rw semaphore i_mmap_sem in inode and grab it for writing over truncate, hole punching, and other functions removing blocks from extent tree and for read over page faults. We cannot easily use i_data_sem for this since that ranks below transaction start and we need something ranking above it so that it can be held over the whole truncate / hole punching operation. Also remove various workarounds we had in the code to reduce race window when page fault could have created pages with stale mapping information. Signed-off-by: Jan Kara <[email protected]> Signed-off-by: Theodore Ts'o <[email protected]> CWE ID: CWE-362
static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { int result; handle_t *handle = NULL; struct inode *inode = file_inode(vma->vm_file); struct super_block *sb = inode->i_sb; bool write = vmf->flags & FAULT_FLAG_WRITE; if (write) { sb_start_pagefault(sb); file_update_time(vma->vm_file); down_read(&EXT4_I(inode)->i_mmap_sem); handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, EXT4_DATA_TRANS_BLOCKS(sb)); } else down_read(&EXT4_I(inode)->i_mmap_sem); if (IS_ERR(handle)) result = VM_FAULT_SIGBUS; else result = __dax_fault(vma, vmf, ext4_get_block_dax, ext4_end_io_unwritten); if (write) { if (!IS_ERR(handle)) ext4_journal_stop(handle); up_read(&EXT4_I(inode)->i_mmap_sem); sb_end_pagefault(sb); } else up_read(&EXT4_I(inode)->i_mmap_sem); return result; }
167,486
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: vrrp_print_stats(void) { FILE *file; file = fopen (stats_file, "w"); if (!file) { log_message(LOG_INFO, "Can't open %s (%d: %s)", stats_file, errno, strerror(errno)); return; } list l = vrrp_data->vrrp; element e; vrrp_t *vrrp; for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) { vrrp = ELEMENT_DATA(e); fprintf(file, "VRRP Instance: %s\n", vrrp->iname); fprintf(file, " Advertisements:\n"); fprintf(file, " Received: %" PRIu64 "\n", vrrp->stats->advert_rcvd); fprintf(file, " Sent: %d\n", vrrp->stats->advert_sent); fprintf(file, " Became master: %d\n", vrrp->stats->become_master); fprintf(file, " Released master: %d\n", vrrp->stats->release_master); fprintf(file, " Packet Errors:\n"); fprintf(file, " Length: %" PRIu64 "\n", vrrp->stats->packet_len_err); fprintf(file, " TTL: %" PRIu64 "\n", vrrp->stats->ip_ttl_err); fprintf(file, " Invalid Type: %" PRIu64 "\n", vrrp->stats->invalid_type_rcvd); fprintf(file, " Advertisement Interval: %" PRIu64 "\n", vrrp->stats->advert_interval_err); fprintf(file, " Address List: %" PRIu64 "\n", vrrp->stats->addr_list_err); fprintf(file, " Authentication Errors:\n"); fprintf(file, " Invalid Type: %d\n", vrrp->stats->invalid_authtype); #ifdef _WITH_VRRP_AUTH_ fprintf(file, " Type Mismatch: %d\n", vrrp->stats->authtype_mismatch); fprintf(file, " Failure: %d\n", vrrp->stats->auth_failure); #endif fprintf(file, " Priority Zero:\n"); fprintf(file, " Received: %" PRIu64 "\n", vrrp->stats->pri_zero_rcvd); fprintf(file, " Sent: %" PRIu64 "\n", vrrp->stats->pri_zero_sent); } fclose(file); } Commit Message: When opening files for write, ensure they aren't symbolic links Issue #1048 identified that if, for example, a non privileged user created a symbolic link from /etc/keepalvied.data to /etc/passwd, writing to /etc/keepalived.data (which could be invoked via DBus) would cause /etc/passwd to be overwritten. This commit stops keepalived writing to pathnames where the ultimate component is a symbolic link, by setting O_NOFOLLOW whenever opening a file for writing. This might break some setups, where, for example, /etc/keepalived.data was a symbolic link to /home/fred/keepalived.data. If this was the case, instead create a symbolic link from /home/fred/keepalived.data to /tmp/keepalived.data, so that the file is still accessible via /home/fred/keepalived.data. There doesn't appear to be a way around this backward incompatibility, since even checking if the pathname is a symbolic link prior to opening for writing would create a race condition. Signed-off-by: Quentin Armitage <[email protected]> CWE ID: CWE-59
vrrp_print_stats(void) { FILE *file = fopen_safe(stats_file, "w"); element e; vrrp_t *vrrp; if (!file) { log_message(LOG_INFO, "Can't open %s (%d: %s)", stats_file, errno, strerror(errno)); return; } LIST_FOREACH(vrrp_data->vrrp, vrrp, e) { fprintf(file, "VRRP Instance: %s\n", vrrp->iname); fprintf(file, " Advertisements:\n"); fprintf(file, " Received: %" PRIu64 "\n", vrrp->stats->advert_rcvd); fprintf(file, " Sent: %d\n", vrrp->stats->advert_sent); fprintf(file, " Became master: %d\n", vrrp->stats->become_master); fprintf(file, " Released master: %d\n", vrrp->stats->release_master); fprintf(file, " Packet Errors:\n"); fprintf(file, " Length: %" PRIu64 "\n", vrrp->stats->packet_len_err); fprintf(file, " TTL: %" PRIu64 "\n", vrrp->stats->ip_ttl_err); fprintf(file, " Invalid Type: %" PRIu64 "\n", vrrp->stats->invalid_type_rcvd); fprintf(file, " Advertisement Interval: %" PRIu64 "\n", vrrp->stats->advert_interval_err); fprintf(file, " Address List: %" PRIu64 "\n", vrrp->stats->addr_list_err); fprintf(file, " Authentication Errors:\n"); fprintf(file, " Invalid Type: %d\n", vrrp->stats->invalid_authtype); #ifdef _WITH_VRRP_AUTH_ fprintf(file, " Type Mismatch: %d\n", vrrp->stats->authtype_mismatch); fprintf(file, " Failure: %d\n", vrrp->stats->auth_failure); #endif fprintf(file, " Priority Zero:\n"); fprintf(file, " Received: %" PRIu64 "\n", vrrp->stats->pri_zero_rcvd); fprintf(file, " Sent: %" PRIu64 "\n", vrrp->stats->pri_zero_sent); } fclose(file); }
168,992
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void PushMessagingServiceImpl::DidHandleMessage( const std::string& app_id, const base::Closure& message_handled_closure) { auto in_flight_iterator = in_flight_message_deliveries_.find(app_id); DCHECK(in_flight_iterator != in_flight_message_deliveries_.end()); in_flight_message_deliveries_.erase(in_flight_iterator); #if BUILDFLAG(ENABLE_BACKGROUND) if (in_flight_message_deliveries_.empty()) in_flight_keep_alive_.reset(); #endif message_handled_closure.Run(); if (push_messaging_service_observer_) push_messaging_service_observer_->OnMessageHandled(); } Commit Message: Remove some senseless indirection from the Push API code Four files to call one Java function. Let's just call it directly. BUG= Change-Id: I6e988e9a000051dd7e3dd2b517a33a09afc2fff6 Reviewed-on: https://chromium-review.googlesource.com/749147 Reviewed-by: Anita Woodruff <[email protected]> Commit-Queue: Peter Beverloo <[email protected]> Cr-Commit-Position: refs/heads/master@{#513464} CWE ID: CWE-119
void PushMessagingServiceImpl::DidHandleMessage( const std::string& app_id, const base::Closure& message_handled_closure) { auto in_flight_iterator = in_flight_message_deliveries_.find(app_id); DCHECK(in_flight_iterator != in_flight_message_deliveries_.end()); in_flight_message_deliveries_.erase(in_flight_iterator); #if BUILDFLAG(ENABLE_BACKGROUND) if (in_flight_message_deliveries_.empty()) in_flight_keep_alive_.reset(); #endif message_handled_closure.Run(); #if defined(OS_ANDROID) chrome::android::Java_PushMessagingServiceObserver_onMessageHandled( base::android::AttachCurrentThread()); #endif }
172,941
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: lha_read_file_header_1(struct archive_read *a, struct lha *lha) { const unsigned char *p; size_t extdsize; int i, err, err2; int namelen, padding; unsigned char headersum, sum_calculated; err = ARCHIVE_OK; if ((p = __archive_read_ahead(a, H1_FIXED_SIZE, NULL)) == NULL) return (truncated_error(a)); lha->header_size = p[H1_HEADER_SIZE_OFFSET] + 2; headersum = p[H1_HEADER_SUM_OFFSET]; /* Note: An extended header size is included in a compsize. */ lha->compsize = archive_le32dec(p + H1_COMP_SIZE_OFFSET); lha->origsize = archive_le32dec(p + H1_ORIG_SIZE_OFFSET); lha->mtime = lha_dos_time(p + H1_DOS_TIME_OFFSET); namelen = p[H1_NAME_LEN_OFFSET]; /* Calculate a padding size. The result will be normally 0 only(?) */ padding = ((int)lha->header_size) - H1_FIXED_SIZE - namelen; if (namelen > 230 || padding < 0) goto invalid; if ((p = __archive_read_ahead(a, lha->header_size, NULL)) == NULL) return (truncated_error(a)); for (i = 0; i < namelen; i++) { if (p[i + H1_FILE_NAME_OFFSET] == 0xff) goto invalid;/* Invalid filename. */ } archive_strncpy(&lha->filename, p + H1_FILE_NAME_OFFSET, namelen); lha->crc = archive_le16dec(p + H1_FILE_NAME_OFFSET + namelen); lha->setflag |= CRC_IS_SET; sum_calculated = lha_calcsum(0, p, 2, lha->header_size - 2); /* Consume used bytes but not include `next header size' data * since it will be consumed in lha_read_file_extended_header(). */ __archive_read_consume(a, lha->header_size - 2); /* Read extended headers */ err2 = lha_read_file_extended_header(a, lha, NULL, 2, (size_t)(lha->compsize + 2), &extdsize); if (err2 < ARCHIVE_WARN) return (err2); if (err2 < err) err = err2; /* Get a real compressed file size. */ lha->compsize -= extdsize - 2; if (sum_calculated != headersum) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "LHa header sum error"); return (ARCHIVE_FATAL); } return (err); invalid: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid LHa header"); return (ARCHIVE_FATAL); } Commit Message: Fail with negative lha->compsize in lha_read_file_header_1() Fixes a heap buffer overflow reported in Secunia SA74169 CWE ID: CWE-125
lha_read_file_header_1(struct archive_read *a, struct lha *lha) { const unsigned char *p; size_t extdsize; int i, err, err2; int namelen, padding; unsigned char headersum, sum_calculated; err = ARCHIVE_OK; if ((p = __archive_read_ahead(a, H1_FIXED_SIZE, NULL)) == NULL) return (truncated_error(a)); lha->header_size = p[H1_HEADER_SIZE_OFFSET] + 2; headersum = p[H1_HEADER_SUM_OFFSET]; /* Note: An extended header size is included in a compsize. */ lha->compsize = archive_le32dec(p + H1_COMP_SIZE_OFFSET); lha->origsize = archive_le32dec(p + H1_ORIG_SIZE_OFFSET); lha->mtime = lha_dos_time(p + H1_DOS_TIME_OFFSET); namelen = p[H1_NAME_LEN_OFFSET]; /* Calculate a padding size. The result will be normally 0 only(?) */ padding = ((int)lha->header_size) - H1_FIXED_SIZE - namelen; if (namelen > 230 || padding < 0) goto invalid; if ((p = __archive_read_ahead(a, lha->header_size, NULL)) == NULL) return (truncated_error(a)); for (i = 0; i < namelen; i++) { if (p[i + H1_FILE_NAME_OFFSET] == 0xff) goto invalid;/* Invalid filename. */ } archive_strncpy(&lha->filename, p + H1_FILE_NAME_OFFSET, namelen); lha->crc = archive_le16dec(p + H1_FILE_NAME_OFFSET + namelen); lha->setflag |= CRC_IS_SET; sum_calculated = lha_calcsum(0, p, 2, lha->header_size - 2); /* Consume used bytes but not include `next header size' data * since it will be consumed in lha_read_file_extended_header(). */ __archive_read_consume(a, lha->header_size - 2); /* Read extended headers */ err2 = lha_read_file_extended_header(a, lha, NULL, 2, (size_t)(lha->compsize + 2), &extdsize); if (err2 < ARCHIVE_WARN) return (err2); if (err2 < err) err = err2; /* Get a real compressed file size. */ lha->compsize -= extdsize - 2; if (lha->compsize < 0) goto invalid; /* Invalid compressed file size */ if (sum_calculated != headersum) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "LHa header sum error"); return (ARCHIVE_FATAL); } return (err); invalid: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid LHa header"); return (ARCHIVE_FATAL); }
168,381
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: SelectionInDOMTree ConvertToSelectionInDOMTree( const SelectionInFlatTree& selection_in_flat_tree) { return SelectionInDOMTree::Builder() .SetAffinity(selection_in_flat_tree.Affinity()) .SetBaseAndExtent(ToPositionInDOMTree(selection_in_flat_tree.Base()), ToPositionInDOMTree(selection_in_flat_tree.Extent())) .SetIsDirectional(selection_in_flat_tree.IsDirectional()) .SetIsHandleVisible(selection_in_flat_tree.IsHandleVisible()) .Build(); } Commit Message: Move SelectionTemplate::is_handle_visible_ to FrameSelection This patch moves |is_handle_visible_| to |FrameSelection| from |SelectionTemplate| since handle visibility is used only for setting |FrameSelection|, hence it is a redundant member variable of |SelectionTemplate|. Bug: 742093 Change-Id: I3add4da3844fb40be34dcb4d4b46b5fa6fed1d7e Reviewed-on: https://chromium-review.googlesource.com/595389 Commit-Queue: Yoshifumi Inoue <[email protected]> Reviewed-by: Xiaocheng Hu <[email protected]> Reviewed-by: Kent Tamura <[email protected]> Cr-Commit-Position: refs/heads/master@{#491660} CWE ID: CWE-119
SelectionInDOMTree ConvertToSelectionInDOMTree( const SelectionInFlatTree& selection_in_flat_tree) { return SelectionInDOMTree::Builder() .SetAffinity(selection_in_flat_tree.Affinity()) .SetBaseAndExtent(ToPositionInDOMTree(selection_in_flat_tree.Base()), ToPositionInDOMTree(selection_in_flat_tree.Extent())) .SetIsDirectional(selection_in_flat_tree.IsDirectional()) .Build(); }
171,762
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void GM2TabStyle::PaintTab(gfx::Canvas* canvas, const SkPath& clip) const { int active_tab_fill_id = 0; int active_tab_y_inset = 0; if (tab_->GetThemeProvider()->HasCustomImage(IDR_THEME_TOOLBAR)) { active_tab_fill_id = IDR_THEME_TOOLBAR; active_tab_y_inset = GetStrokeThickness(true); } if (tab_->IsActive()) { PaintTabBackground(canvas, true /* active */, active_tab_fill_id, active_tab_y_inset, nullptr /* clip */); } else { PaintInactiveTabBackground(canvas, clip); const float throb_value = GetThrobValue(); if (throb_value > 0) { canvas->SaveLayerAlpha(gfx::ToRoundedInt(throb_value * 0xff), tab_->GetLocalBounds()); PaintTabBackground(canvas, true /* active */, active_tab_fill_id, active_tab_y_inset, nullptr /* clip */); canvas->Restore(); } } } Commit Message: Paint tab groups with the group color. * The background of TabGroupHeader now uses the group color. * The backgrounds of tabs in the group are tinted with the group color. This treatment, along with the colors chosen, are intended to be a placeholder. Bug: 905491 Change-Id: Ic808548f8eba23064606e7fb8c9bba281d0d117f Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1610504 Commit-Queue: Bret Sepulveda <[email protected]> Reviewed-by: Taylor Bergquist <[email protected]> Cr-Commit-Position: refs/heads/master@{#660498} CWE ID: CWE-20
void GM2TabStyle::PaintTab(gfx::Canvas* canvas, const SkPath& clip) const { int active_tab_fill_id = 0; int active_tab_y_inset = 0; if (tab_->GetThemeProvider()->HasCustomImage(IDR_THEME_TOOLBAR)) { active_tab_fill_id = IDR_THEME_TOOLBAR; active_tab_y_inset = GetStrokeThickness(true); } if (tab_->IsActive()) { PaintTabBackground(canvas, TAB_ACTIVE, active_tab_fill_id, active_tab_y_inset, nullptr /* clip */); } else { PaintInactiveTabBackground(canvas, clip); const float throb_value = GetThrobValue(); if (throb_value > 0) { canvas->SaveLayerAlpha(gfx::ToRoundedInt(throb_value * 0xff), tab_->GetLocalBounds()); PaintTabBackground(canvas, TAB_ACTIVE, active_tab_fill_id, active_tab_y_inset, nullptr /* clip */); canvas->Restore(); } } }
172,524
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: IDNSpoofChecker::IDNSpoofChecker() { UErrorCode status = U_ZERO_ERROR; checker_ = uspoof_open(&status); if (U_FAILURE(status)) { checker_ = nullptr; return; } uspoof_setRestrictionLevel(checker_, USPOOF_HIGHLY_RESTRICTIVE); SetAllowedUnicodeSet(&status); int32_t checks = uspoof_getChecks(checker_, &status) | USPOOF_AUX_INFO; uspoof_setChecks(checker_, checks, &status); deviation_characters_ = icu::UnicodeSet( UNICODE_STRING_SIMPLE("[\\u00df\\u03c2\\u200c\\u200d]"), status); deviation_characters_.freeze(); non_ascii_latin_letters_ = icu::UnicodeSet(UNICODE_STRING_SIMPLE("[[:Latin:] - [a-zA-Z]]"), status); non_ascii_latin_letters_.freeze(); kana_letters_exceptions_ = icu::UnicodeSet( UNICODE_STRING_SIMPLE("[\\u3078-\\u307a\\u30d8-\\u30da\\u30fb-\\u30fe]"), status); kana_letters_exceptions_.freeze(); combining_diacritics_exceptions_ = icu::UnicodeSet(UNICODE_STRING_SIMPLE("[\\u0300-\\u0339]"), status); combining_diacritics_exceptions_.freeze(); cyrillic_letters_latin_alike_ = icu::UnicodeSet( icu::UnicodeString::fromUTF8("[асԁеһіјӏорԛѕԝхуъЬҽпгѵѡ]"), status); cyrillic_letters_latin_alike_.freeze(); cyrillic_letters_ = icu::UnicodeSet(UNICODE_STRING_SIMPLE("[[:Cyrl:]]"), status); cyrillic_letters_.freeze(); DCHECK(U_SUCCESS(status)); lgc_letters_n_ascii_ = icu::UnicodeSet( UNICODE_STRING_SIMPLE("[[:Latin:][:Greek:][:Cyrillic:][0-9\\u002e_" "\\u002d][\\u0300-\\u0339]]"), status); lgc_letters_n_ascii_.freeze(); UParseError parse_error; diacritic_remover_.reset(icu::Transliterator::createFromRules( UNICODE_STRING_SIMPLE("DropAcc"), icu::UnicodeString::fromUTF8("::NFD; ::[:Nonspacing Mark:] Remove; ::NFC;" " ł > l; ø > o; đ > d;"), UTRANS_FORWARD, parse_error, status)); UNICODE_STRING_SIMPLE("ExtraConf"), icu::UnicodeString::fromUTF8("[þϼҏ] > p; [ħнћңҥӈԧԩ] > h;" "[ĸκкқҝҟҡӄԟ] > k; [ŧтҭ] > t;" "[ƅьҍв] > b; [ωшщ] > w; [мӎ] > m;" "п > n; ћ > h; ґ > r; ғ > f; ҫ > c;" "ұ > y; [χҳӽӿ] > x; [ҽҿ] > e;" #if defined(OS_WIN) "ӏ > i;" #else "ӏ > l;" #endif "ԃ > d; ԍ > g; ട > s"), UTRANS_FORWARD, parse_error, status)); DCHECK(U_SUCCESS(status)) << "Spoofchecker initalization failed due to an error: " << u_errorName(status); } Commit Message: Map U+0454 (є) to 'e' (small E) Bug: 803571 Test: components_unittests --gtest_filter=*IDN* Change-Id: I8cc473d0e74208076a2aa17c1869d14bbfaa20ed Reviewed-on: https://chromium-review.googlesource.com/882006 Commit-Queue: Jungshik Shin <[email protected]> Reviewed-by: Peter Kasting <[email protected]> Cr-Commit-Position: refs/heads/master@{#531739} CWE ID:
IDNSpoofChecker::IDNSpoofChecker() { UErrorCode status = U_ZERO_ERROR; checker_ = uspoof_open(&status); if (U_FAILURE(status)) { checker_ = nullptr; return; } uspoof_setRestrictionLevel(checker_, USPOOF_HIGHLY_RESTRICTIVE); SetAllowedUnicodeSet(&status); int32_t checks = uspoof_getChecks(checker_, &status) | USPOOF_AUX_INFO; uspoof_setChecks(checker_, checks, &status); deviation_characters_ = icu::UnicodeSet( UNICODE_STRING_SIMPLE("[\\u00df\\u03c2\\u200c\\u200d]"), status); deviation_characters_.freeze(); non_ascii_latin_letters_ = icu::UnicodeSet(UNICODE_STRING_SIMPLE("[[:Latin:] - [a-zA-Z]]"), status); non_ascii_latin_letters_.freeze(); kana_letters_exceptions_ = icu::UnicodeSet( UNICODE_STRING_SIMPLE("[\\u3078-\\u307a\\u30d8-\\u30da\\u30fb-\\u30fe]"), status); kana_letters_exceptions_.freeze(); combining_diacritics_exceptions_ = icu::UnicodeSet(UNICODE_STRING_SIMPLE("[\\u0300-\\u0339]"), status); combining_diacritics_exceptions_.freeze(); cyrillic_letters_latin_alike_ = icu::UnicodeSet( icu::UnicodeString::fromUTF8("[асԁеһіјӏорԛѕԝхуъЬҽпгѵѡ]"), status); cyrillic_letters_latin_alike_.freeze(); cyrillic_letters_ = icu::UnicodeSet(UNICODE_STRING_SIMPLE("[[:Cyrl:]]"), status); cyrillic_letters_.freeze(); DCHECK(U_SUCCESS(status)); lgc_letters_n_ascii_ = icu::UnicodeSet( UNICODE_STRING_SIMPLE("[[:Latin:][:Greek:][:Cyrillic:][0-9\\u002e_" "\\u002d][\\u0300-\\u0339]]"), status); lgc_letters_n_ascii_.freeze(); UParseError parse_error; diacritic_remover_.reset(icu::Transliterator::createFromRules( UNICODE_STRING_SIMPLE("DropAcc"), icu::UnicodeString::fromUTF8("::NFD; ::[:Nonspacing Mark:] Remove; ::NFC;" " ł > l; ø > o; đ > d;"), UTRANS_FORWARD, parse_error, status)); // - U+043F (п) => n // - {U+0454 (є), U+04BD (ҽ), U+04BF (ҿ)} => e UNICODE_STRING_SIMPLE("ExtraConf"), icu::UnicodeString::fromUTF8("[þϼҏ] > p; [ħнћңҥӈԧԩ] > h;" "[ĸκкқҝҟҡӄԟ] > k; [ŧтҭ] > t;" "[ƅьҍв] > b; [ωшщ] > w; [мӎ] > m;" "п > n; [єҽҿ] > e; ґ > r; ғ > f; ҫ > c;" "ұ > y; [χҳӽӿ] > x;" #if defined(OS_WIN) "ӏ > i;" #else "ӏ > l;" #endif "ԃ > d; ԍ > g; ട > s"), UTRANS_FORWARD, parse_error, status)); DCHECK(U_SUCCESS(status)) << "Spoofchecker initalization failed due to an error: " << u_errorName(status); }
172,737
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int get_bitmap_file(struct mddev *mddev, void __user * arg) { mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ char *ptr; int err; file = kmalloc(sizeof(*file), GFP_NOIO); if (!file) return -ENOMEM; err = 0; spin_lock(&mddev->lock); /* bitmap disabled, zero the first byte and copy out */ if (!mddev->bitmap_info.file) file->pathname[0] = '\0'; else if ((ptr = file_path(mddev->bitmap_info.file, file->pathname, sizeof(file->pathname))), IS_ERR(ptr)) err = PTR_ERR(ptr); else memmove(file->pathname, ptr, sizeof(file->pathname)-(ptr-file->pathname)); spin_unlock(&mddev->lock); if (err == 0 && copy_to_user(arg, file, sizeof(*file))) err = -EFAULT; kfree(file); return err; } Commit Message: md: use kzalloc() when bitmap is disabled In drivers/md/md.c get_bitmap_file() uses kmalloc() for creating a mdu_bitmap_file_t called "file". 5769 file = kmalloc(sizeof(*file), GFP_NOIO); 5770 if (!file) 5771 return -ENOMEM; This structure is copied to user space at the end of the function. 5786 if (err == 0 && 5787 copy_to_user(arg, file, sizeof(*file))) 5788 err = -EFAULT But if bitmap is disabled only the first byte of "file" is initialized with zero, so it's possible to read some bytes (up to 4095) of kernel space memory from user space. This is an information leak. 5775 /* bitmap disabled, zero the first byte and copy out */ 5776 if (!mddev->bitmap_info.file) 5777 file->pathname[0] = '\0'; Signed-off-by: Benjamin Randazzo <[email protected]> Signed-off-by: NeilBrown <[email protected]> CWE ID: CWE-200
static int get_bitmap_file(struct mddev *mddev, void __user * arg) { mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ char *ptr; int err; file = kzalloc(sizeof(*file), GFP_NOIO); if (!file) return -ENOMEM; err = 0; spin_lock(&mddev->lock); /* bitmap disabled, zero the first byte and copy out */ if (!mddev->bitmap_info.file) file->pathname[0] = '\0'; else if ((ptr = file_path(mddev->bitmap_info.file, file->pathname, sizeof(file->pathname))), IS_ERR(ptr)) err = PTR_ERR(ptr); else memmove(file->pathname, ptr, sizeof(file->pathname)-(ptr-file->pathname)); spin_unlock(&mddev->lock); if (err == 0 && copy_to_user(arg, file, sizeof(*file))) err = -EFAULT; kfree(file); return err; }
166,595
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: Segment::~Segment() { const long count = m_clusterCount + m_clusterPreloadCount; Cluster** i = m_clusters; Cluster** j = m_clusters + count; while (i != j) { Cluster* const p = *i++; assert(p); delete p; } delete[] m_clusters; delete m_pTracks; delete m_pInfo; delete m_pCues; delete m_pChapters; delete m_pSeekHead; } Commit Message: external/libvpx/libwebm: Update snapshot Update libwebm snapshot. This update contains security fixes from upstream. Upstream git hash: 229f49347d19b0ca0941e072b199a242ef6c5f2b BUG=23167726 Change-Id: Id3e140e7b31ae11294724b1ecfe2e9c83b4d4207 (cherry picked from commit d0281a15b3c6bd91756e453cc9398c5ef412d99a) CWE ID: CWE-20
Segment::~Segment() { const long count = m_clusterCount + m_clusterPreloadCount; Cluster** i = m_clusters; Cluster** j = m_clusters + count; while (i != j) { Cluster* const p = *i++; delete p; } delete[] m_clusters; delete m_pTracks; delete m_pInfo; delete m_pCues; delete m_pChapters; delete m_pTags; delete m_pSeekHead; }
173,870
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: status_t BnHDCP::onTransact( uint32_t code, const Parcel &data, Parcel *reply, uint32_t flags) { switch (code) { case HDCP_SET_OBSERVER: { CHECK_INTERFACE(IHDCP, data, reply); sp<IHDCPObserver> observer = interface_cast<IHDCPObserver>(data.readStrongBinder()); reply->writeInt32(setObserver(observer)); return OK; } case HDCP_INIT_ASYNC: { CHECK_INTERFACE(IHDCP, data, reply); const char *host = data.readCString(); unsigned port = data.readInt32(); reply->writeInt32(initAsync(host, port)); return OK; } case HDCP_SHUTDOWN_ASYNC: { CHECK_INTERFACE(IHDCP, data, reply); reply->writeInt32(shutdownAsync()); return OK; } case HDCP_GET_CAPS: { CHECK_INTERFACE(IHDCP, data, reply); reply->writeInt32(getCaps()); return OK; } case HDCP_ENCRYPT: { size_t size = data.readInt32(); size_t bufSize = 2 * size; if (bufSize > size) { inData = malloc(bufSize); } if (inData == NULL) { reply->writeInt32(ERROR_OUT_OF_RANGE); return OK; } void *outData = (uint8_t *)inData + size; data.read(inData, size); uint32_t streamCTR = data.readInt32(); uint64_t inputCTR; status_t err = encrypt(inData, size, streamCTR, &inputCTR, outData); reply->writeInt32(err); if (err == OK) { reply->writeInt64(inputCTR); reply->write(outData, size); } free(inData); inData = outData = NULL; return OK; } case HDCP_ENCRYPT_NATIVE: { CHECK_INTERFACE(IHDCP, data, reply); sp<GraphicBuffer> graphicBuffer = new GraphicBuffer(); data.read(*graphicBuffer); size_t offset = data.readInt32(); size_t size = data.readInt32(); uint32_t streamCTR = data.readInt32(); void *outData = malloc(size); uint64_t inputCTR; status_t err = encryptNative(graphicBuffer, offset, size, streamCTR, &inputCTR, outData); reply->writeInt32(err); if (err == OK) { reply->writeInt64(inputCTR); reply->write(outData, size); } free(outData); outData = NULL; return OK; } case HDCP_DECRYPT: { size_t size = data.readInt32(); size_t bufSize = 2 * size; void *inData = NULL; if (bufSize > size) { inData = malloc(bufSize); } if (inData == NULL) { reply->writeInt32(ERROR_OUT_OF_RANGE); return OK; } void *outData = (uint8_t *)inData + size; data.read(inData, size); uint32_t streamCTR = data.readInt32(); uint64_t inputCTR = data.readInt64(); status_t err = decrypt(inData, size, streamCTR, inputCTR, outData); reply->writeInt32(err); if (err == OK) { reply->write(outData, size); } free(inData); inData = outData = NULL; return OK; } default: return BBinder::onTransact(code, data, reply, flags); } } Commit Message: Fix overflow check and check read result Bug: 33861560 Test: build Change-Id: Ia85519766e19a6e37237166f309750b3e8323c4e CWE ID: CWE-200
status_t BnHDCP::onTransact( uint32_t code, const Parcel &data, Parcel *reply, uint32_t flags) { switch (code) { case HDCP_SET_OBSERVER: { CHECK_INTERFACE(IHDCP, data, reply); sp<IHDCPObserver> observer = interface_cast<IHDCPObserver>(data.readStrongBinder()); reply->writeInt32(setObserver(observer)); return OK; } case HDCP_INIT_ASYNC: { CHECK_INTERFACE(IHDCP, data, reply); const char *host = data.readCString(); unsigned port = data.readInt32(); reply->writeInt32(initAsync(host, port)); return OK; } case HDCP_SHUTDOWN_ASYNC: { CHECK_INTERFACE(IHDCP, data, reply); reply->writeInt32(shutdownAsync()); return OK; } case HDCP_GET_CAPS: { CHECK_INTERFACE(IHDCP, data, reply); reply->writeInt32(getCaps()); return OK; } case HDCP_ENCRYPT: { size_t size = data.readInt32(); // watch out for overflow if (size <= SIZE_MAX / 2) { inData = malloc(2 * size); } if (inData == NULL) { reply->writeInt32(ERROR_OUT_OF_RANGE); return OK; } void *outData = (uint8_t *)inData + size; status_t err = data.read(inData, size); if (err != OK) { free(inData); reply->writeInt32(err); return OK; } uint32_t streamCTR = data.readInt32(); uint64_t inputCTR; err = encrypt(inData, size, streamCTR, &inputCTR, outData); reply->writeInt32(err); if (err == OK) { reply->writeInt64(inputCTR); reply->write(outData, size); } free(inData); inData = outData = NULL; return OK; } case HDCP_ENCRYPT_NATIVE: { CHECK_INTERFACE(IHDCP, data, reply); sp<GraphicBuffer> graphicBuffer = new GraphicBuffer(); data.read(*graphicBuffer); size_t offset = data.readInt32(); size_t size = data.readInt32(); uint32_t streamCTR = data.readInt32(); void *outData = malloc(size); uint64_t inputCTR; status_t err = encryptNative(graphicBuffer, offset, size, streamCTR, &inputCTR, outData); reply->writeInt32(err); if (err == OK) { reply->writeInt64(inputCTR); reply->write(outData, size); } free(outData); outData = NULL; return OK; } case HDCP_DECRYPT: { size_t size = data.readInt32(); size_t bufSize = 2 * size; void *inData = NULL; if (bufSize > size) { inData = malloc(bufSize); } if (inData == NULL) { reply->writeInt32(ERROR_OUT_OF_RANGE); return OK; } void *outData = (uint8_t *)inData + size; data.read(inData, size); uint32_t streamCTR = data.readInt32(); uint64_t inputCTR = data.readInt64(); status_t err = decrypt(inData, size, streamCTR, inputCTR, outData); reply->writeInt32(err); if (err == OK) { reply->write(outData, size); } free(inData); inData = outData = NULL; return OK; } default: return BBinder::onTransact(code, data, reply, flags); } }
174,047
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void HostCache::Set(const Key& key, const Entry& entry, base::TimeTicks now, base::TimeDelta ttl) { TRACE_EVENT0(kNetTracingCategory, "HostCache::Set"); DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); if (caching_is_disabled()) return; auto it = entries_.find(key); if (it != entries_.end()) { bool is_stale = it->second.IsStale(now, network_changes_); RecordSet(is_stale ? SET_UPDATE_STALE : SET_UPDATE_VALID, now, &it->second, entry); entries_.erase(it); } else { if (size() == max_entries_) EvictOneEntry(now); RecordSet(SET_INSERT, now, nullptr, entry); } AddEntry(Key(key), Entry(entry, now, ttl, network_changes_)); } Commit Message: Add PersistenceDelegate to HostCache PersistenceDelegate is a new interface for persisting the contents of the HostCache. This commit includes the interface itself, the logic in HostCache for interacting with it, and a mock implementation of the interface for testing. It does not include support for immediate data removal since that won't be needed for the currently planned use case. BUG=605149 Review-Url: https://codereview.chromium.org/2943143002 Cr-Commit-Position: refs/heads/master@{#481015} CWE ID:
void HostCache::Set(const Key& key, const Entry& entry, base::TimeTicks now, base::TimeDelta ttl) { TRACE_EVENT0(kNetTracingCategory, "HostCache::Set"); DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); if (caching_is_disabled()) return; bool result_changed = false; auto it = entries_.find(key); if (it != entries_.end()) { bool is_stale = it->second.IsStale(now, network_changes_); AddressListDeltaType delta = FindAddressListDeltaType(it->second.addresses(), entry.addresses()); RecordSet(is_stale ? SET_UPDATE_STALE : SET_UPDATE_VALID, now, &it->second, entry, delta); result_changed = entry.error() == OK && (it->second.error() != entry.error() || delta != DELTA_IDENTICAL); entries_.erase(it); } else { result_changed = true; if (size() == max_entries_) EvictOneEntry(now); RecordSet(SET_INSERT, now, nullptr, entry, DELTA_DISJOINT); } AddEntry(Key(key), Entry(entry, now, ttl, network_changes_)); if (delegate_ && result_changed) delegate_->ScheduleWrite(); }
172,009
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: SplashError Splash::fillImageMask(SplashImageMaskSource src, void *srcData, int w, int h, SplashCoord *mat, GBool glyphMode) { SplashPipe pipe; GBool rot; SplashCoord xScale, yScale, xShear, yShear, yShear1; int tx, tx2, ty, ty2, scaledWidth, scaledHeight, xSign, ySign; int ulx, uly, llx, lly, urx, ury, lrx, lry; int ulx1, uly1, llx1, lly1, urx1, ury1, lrx1, lry1; int xMin, xMax, yMin, yMax; SplashClipResult clipRes, clipRes2; int yp, yq, yt, yStep, lastYStep; int xp, xq, xt, xStep, xSrc; int k1, spanXMin, spanXMax, spanY; SplashColorPtr pixBuf, p; int pixAcc; int x, y, x1, x2, y2; SplashCoord y1; int n, m, i, j; if (debugMode) { printf("fillImageMask: w=%d h=%d mat=[%.2f %.2f %.2f %.2f %.2f %.2f]\n", w, h, (double)mat[0], (double)mat[1], (double)mat[2], (double)mat[3], (double)mat[4], (double)mat[5]); } if (w == 0 && h == 0) return splashErrZeroImage; if (splashAbs(mat[0] * mat[3] - mat[1] * mat[2]) < 0.000001) { return splashErrSingularMatrix; } rot = splashAbs(mat[1]) > splashAbs(mat[0]); if (rot) { xScale = -mat[1]; yScale = mat[2] - (mat[0] * mat[3]) / mat[1]; xShear = -mat[3] / yScale; yShear = -mat[0] / mat[1]; } else { xScale = mat[0]; yScale = mat[3] - (mat[1] * mat[2]) / mat[0]; xShear = mat[2] / yScale; yShear = mat[1] / mat[0]; } if (glyphMode) { if (xScale >= 0) { tx = splashRound(mat[4]); tx2 = splashRound(mat[4] + xScale) - 1; } else { tx = splashRound(mat[4]) - 1; tx2 = splashRound(mat[4] + xScale); } } else { if (xScale >= 0) { tx = splashFloor(mat[4] - 0.01); tx2 = splashFloor(mat[4] + xScale + 0.01); } else { tx = splashFloor(mat[4] + 0.01); tx2 = splashFloor(mat[4] + xScale - 0.01); } } scaledWidth = abs(tx2 - tx) + 1; if (glyphMode) { if (yScale >= 0) { ty = splashRound(mat[5]); ty2 = splashRound(mat[5] + yScale) - 1; } else { ty = splashRound(mat[5]) - 1; ty2 = splashRound(mat[5] + yScale); } } else { if (yScale >= 0) { ty = splashFloor(mat[5] - 0.01); ty2 = splashFloor(mat[5] + yScale + 0.01); } else { ty = splashFloor(mat[5] + 0.01); ty2 = splashFloor(mat[5] + yScale - 0.01); } } scaledHeight = abs(ty2 - ty) + 1; xSign = (xScale < 0) ? -1 : 1; ySign = (yScale < 0) ? -1 : 1; yShear1 = (SplashCoord)xSign * yShear; ulx1 = 0; uly1 = 0; urx1 = xSign * (scaledWidth - 1); ury1 = (int)(yShear * urx1); llx1 = splashRound(xShear * ySign * (scaledHeight - 1)); lly1 = ySign * (scaledHeight - 1) + (int)(yShear * llx1); lrx1 = xSign * (scaledWidth - 1) + splashRound(xShear * ySign * (scaledHeight - 1)); lry1 = ySign * (scaledHeight - 1) + (int)(yShear * lrx1); if (rot) { ulx = tx + uly1; uly = ty - ulx1; urx = tx + ury1; ury = ty - urx1; llx = tx + lly1; lly = ty - llx1; lrx = tx + lry1; lry = ty - lrx1; } else { ulx = tx + ulx1; uly = ty + uly1; urx = tx + urx1; ury = ty + ury1; llx = tx + llx1; lly = ty + lly1; lrx = tx + lrx1; lry = ty + lry1; } xMin = (ulx < urx) ? (ulx < llx) ? (ulx < lrx) ? ulx : lrx : (llx < lrx) ? llx : lrx : (urx < llx) ? (urx < lrx) ? urx : lrx : (llx < lrx) ? llx : lrx; xMax = (ulx > urx) ? (ulx > llx) ? (ulx > lrx) ? ulx : lrx : (llx > lrx) ? llx : lrx : (urx > llx) ? (urx > lrx) ? urx : lrx : (llx > lrx) ? llx : lrx; yMin = (uly < ury) ? (uly < lly) ? (uly < lry) ? uly : lry : (lly < lry) ? lly : lry : (ury < lly) ? (ury < lry) ? ury : lry : (lly < lry) ? lly : lry; yMax = (uly > ury) ? (uly > lly) ? (uly > lry) ? uly : lry : (lly > lry) ? lly : lry : (ury > lly) ? (ury > lry) ? ury : lry : (lly > lry) ? lly : lry; clipRes = state->clip->testRect(xMin, yMin, xMax, yMax); opClipRes = clipRes; yp = h / scaledHeight; yq = h % scaledHeight; xp = w / scaledWidth; xq = w % scaledWidth; pixBuf = (SplashColorPtr)gmalloc((yp + 1) * w); pipeInit(&pipe, 0, 0, state->fillPattern, NULL, state->fillAlpha, gTrue, gFalse); if (vectorAntialias) { drawAAPixelInit(); } yt = 0; lastYStep = 1; for (y = 0; y < scaledHeight; ++y) { yStep = yp; yt += yq; if (yt >= scaledHeight) { yt -= scaledHeight; ++yStep; } n = (yp > 0) ? yStep : lastYStep; if (n > 0) { p = pixBuf; for (i = 0; i < n; ++i) { (*src)(srcData, p); p += w; } } lastYStep = yStep; k1 = splashRound(xShear * ySign * y); if (clipRes != splashClipAllInside && !rot && (int)(yShear * k1) == (int)(yShear * (xSign * (scaledWidth - 1) + k1))) { if (xSign > 0) { spanXMin = tx + k1; spanXMax = spanXMin + (scaledWidth - 1); } else { spanXMax = tx + k1; spanXMin = spanXMax - (scaledWidth - 1); } spanY = ty + ySign * y + (int)(yShear * k1); clipRes2 = state->clip->testSpan(spanXMin, spanXMax, spanY); if (clipRes2 == splashClipAllOutside) { continue; } } else { clipRes2 = clipRes; } xt = 0; xSrc = 0; x1 = k1; y1 = (SplashCoord)ySign * y + yShear * x1; if (yShear1 < 0) { y1 += 0.999; } n = yStep > 0 ? yStep : 1; for (x = 0; x < scaledWidth; ++x) { xStep = xp; xt += xq; if (xt >= scaledWidth) { xt -= scaledWidth; ++xStep; } if (rot) { x2 = (int)y1; y2 = -x1; } else { x2 = x1; y2 = (int)y1; } m = xStep > 0 ? xStep : 1; p = pixBuf + xSrc; pixAcc = 0; for (i = 0; i < n; ++i) { for (j = 0; j < m; ++j) { pixAcc += *p++; } p += w - m; } if (pixAcc != 0) { pipe.shape = (pixAcc == n * m) ? (SplashCoord)1 : (SplashCoord)pixAcc / (SplashCoord)(n * m); if (vectorAntialias && clipRes2 != splashClipAllInside) { drawAAPixel(&pipe, tx + x2, ty + y2); } else { drawPixel(&pipe, tx + x2, ty + y2, clipRes2 == splashClipAllInside); } } xSrc += xStep; x1 += xSign; y1 += yShear1; } } gfree(pixBuf); return splashOk; } Commit Message: CWE ID: CWE-189
SplashError Splash::fillImageMask(SplashImageMaskSource src, void *srcData, int w, int h, SplashCoord *mat, GBool glyphMode) { SplashPipe pipe; GBool rot; SplashCoord xScale, yScale, xShear, yShear, yShear1; int tx, tx2, ty, ty2, scaledWidth, scaledHeight, xSign, ySign; int ulx, uly, llx, lly, urx, ury, lrx, lry; int ulx1, uly1, llx1, lly1, urx1, ury1, lrx1, lry1; int xMin, xMax, yMin, yMax; SplashClipResult clipRes, clipRes2; int yp, yq, yt, yStep, lastYStep; int xp, xq, xt, xStep, xSrc; int k1, spanXMin, spanXMax, spanY; SplashColorPtr pixBuf, p; int pixAcc; int x, y, x1, x2, y2; SplashCoord y1; int n, m, i, j; if (debugMode) { printf("fillImageMask: w=%d h=%d mat=[%.2f %.2f %.2f %.2f %.2f %.2f]\n", w, h, (double)mat[0], (double)mat[1], (double)mat[2], (double)mat[3], (double)mat[4], (double)mat[5]); } if (w == 0 && h == 0) return splashErrZeroImage; if (splashAbs(mat[0] * mat[3] - mat[1] * mat[2]) < 0.000001) { return splashErrSingularMatrix; } rot = splashAbs(mat[1]) > splashAbs(mat[0]); if (rot) { xScale = -mat[1]; yScale = mat[2] - (mat[0] * mat[3]) / mat[1]; xShear = -mat[3] / yScale; yShear = -mat[0] / mat[1]; } else { xScale = mat[0]; yScale = mat[3] - (mat[1] * mat[2]) / mat[0]; xShear = mat[2] / yScale; yShear = mat[1] / mat[0]; } if (glyphMode) { if (xScale >= 0) { tx = splashRound(mat[4]); tx2 = splashRound(mat[4] + xScale) - 1; } else { tx = splashRound(mat[4]) - 1; tx2 = splashRound(mat[4] + xScale); } } else { if (xScale >= 0) { tx = splashFloor(mat[4] - 0.01); tx2 = splashFloor(mat[4] + xScale + 0.01); } else { tx = splashFloor(mat[4] + 0.01); tx2 = splashFloor(mat[4] + xScale - 0.01); } } scaledWidth = abs(tx2 - tx) + 1; if (glyphMode) { if (yScale >= 0) { ty = splashRound(mat[5]); ty2 = splashRound(mat[5] + yScale) - 1; } else { ty = splashRound(mat[5]) - 1; ty2 = splashRound(mat[5] + yScale); } } else { if (yScale >= 0) { ty = splashFloor(mat[5] - 0.01); ty2 = splashFloor(mat[5] + yScale + 0.01); } else { ty = splashFloor(mat[5] + 0.01); ty2 = splashFloor(mat[5] + yScale - 0.01); } } scaledHeight = abs(ty2 - ty) + 1; xSign = (xScale < 0) ? -1 : 1; ySign = (yScale < 0) ? -1 : 1; yShear1 = (SplashCoord)xSign * yShear; ulx1 = 0; uly1 = 0; urx1 = xSign * (scaledWidth - 1); ury1 = (int)(yShear * urx1); llx1 = splashRound(xShear * ySign * (scaledHeight - 1)); lly1 = ySign * (scaledHeight - 1) + (int)(yShear * llx1); lrx1 = xSign * (scaledWidth - 1) + splashRound(xShear * ySign * (scaledHeight - 1)); lry1 = ySign * (scaledHeight - 1) + (int)(yShear * lrx1); if (rot) { ulx = tx + uly1; uly = ty - ulx1; urx = tx + ury1; ury = ty - urx1; llx = tx + lly1; lly = ty - llx1; lrx = tx + lry1; lry = ty - lrx1; } else { ulx = tx + ulx1; uly = ty + uly1; urx = tx + urx1; ury = ty + ury1; llx = tx + llx1; lly = ty + lly1; lrx = tx + lrx1; lry = ty + lry1; } xMin = (ulx < urx) ? (ulx < llx) ? (ulx < lrx) ? ulx : lrx : (llx < lrx) ? llx : lrx : (urx < llx) ? (urx < lrx) ? urx : lrx : (llx < lrx) ? llx : lrx; xMax = (ulx > urx) ? (ulx > llx) ? (ulx > lrx) ? ulx : lrx : (llx > lrx) ? llx : lrx : (urx > llx) ? (urx > lrx) ? urx : lrx : (llx > lrx) ? llx : lrx; yMin = (uly < ury) ? (uly < lly) ? (uly < lry) ? uly : lry : (lly < lry) ? lly : lry : (ury < lly) ? (ury < lry) ? ury : lry : (lly < lry) ? lly : lry; yMax = (uly > ury) ? (uly > lly) ? (uly > lry) ? uly : lry : (lly > lry) ? lly : lry : (ury > lly) ? (ury > lry) ? ury : lry : (lly > lry) ? lly : lry; clipRes = state->clip->testRect(xMin, yMin, xMax, yMax); opClipRes = clipRes; yp = h / scaledHeight; yq = h % scaledHeight; xp = w / scaledWidth; xq = w % scaledWidth; pixBuf = (SplashColorPtr)gmallocn((yp + 1), w); pipeInit(&pipe, 0, 0, state->fillPattern, NULL, state->fillAlpha, gTrue, gFalse); if (vectorAntialias) { drawAAPixelInit(); } yt = 0; lastYStep = 1; for (y = 0; y < scaledHeight; ++y) { yStep = yp; yt += yq; if (yt >= scaledHeight) { yt -= scaledHeight; ++yStep; } n = (yp > 0) ? yStep : lastYStep; if (n > 0) { p = pixBuf; for (i = 0; i < n; ++i) { (*src)(srcData, p); p += w; } } lastYStep = yStep; k1 = splashRound(xShear * ySign * y); if (clipRes != splashClipAllInside && !rot && (int)(yShear * k1) == (int)(yShear * (xSign * (scaledWidth - 1) + k1))) { if (xSign > 0) { spanXMin = tx + k1; spanXMax = spanXMin + (scaledWidth - 1); } else { spanXMax = tx + k1; spanXMin = spanXMax - (scaledWidth - 1); } spanY = ty + ySign * y + (int)(yShear * k1); clipRes2 = state->clip->testSpan(spanXMin, spanXMax, spanY); if (clipRes2 == splashClipAllOutside) { continue; } } else { clipRes2 = clipRes; } xt = 0; xSrc = 0; x1 = k1; y1 = (SplashCoord)ySign * y + yShear * x1; if (yShear1 < 0) { y1 += 0.999; } n = yStep > 0 ? yStep : 1; for (x = 0; x < scaledWidth; ++x) { xStep = xp; xt += xq; if (xt >= scaledWidth) { xt -= scaledWidth; ++xStep; } if (rot) { x2 = (int)y1; y2 = -x1; } else { x2 = x1; y2 = (int)y1; } m = xStep > 0 ? xStep : 1; p = pixBuf + xSrc; pixAcc = 0; for (i = 0; i < n; ++i) { for (j = 0; j < m; ++j) { pixAcc += *p++; } p += w - m; } if (pixAcc != 0) { pipe.shape = (pixAcc == n * m) ? (SplashCoord)1 : (SplashCoord)pixAcc / (SplashCoord)(n * m); if (vectorAntialias && clipRes2 != splashClipAllInside) { drawAAPixel(&pipe, tx + x2, ty + y2); } else { drawPixel(&pipe, tx + x2, ty + y2, clipRes2 == splashClipAllInside); } } xSrc += xStep; x1 += xSign; y1 += yShear1; } } gfree(pixBuf); return splashOk; }
164,619
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: xmalloc (size_t size) { void *ptr = malloc (size); if (!ptr && (size != 0)) /* some libc don't like size == 0 */ { perror ("xmalloc: Memory allocation failure"); abort(); } return ptr; } Commit Message: Fix integer overflows and harden memory allocator. CWE ID: CWE-190
xmalloc (size_t size) xmalloc (size_t num, size_t size) { size_t res; if (check_mul_overflow(num, size, &res)) abort(); void *ptr = malloc (res); if (!ptr && (size != 0)) /* some libc don't like size == 0 */ { perror ("xmalloc: Memory allocation failure"); abort(); } return ptr; }
168,359
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: Platform::IntPoint InRegionScrollableArea::calculateMaximumScrollPosition(const Platform::IntSize& viewportSize, const Platform::IntSize& contentsSize, float overscrollLimitFactor) const { ASSERT(!allowsOverscroll()); return Platform::IntPoint(std::max(contentsSize.width() - viewportSize.width(), 0) + overscrollLimitFactor, std::max(contentsSize.height() - viewportSize.height(), 0) + overscrollLimitFactor); } Commit Message: Remove minimum and maximum scroll position as they are no longer required due to changes in ScrollViewBase. https://bugs.webkit.org/show_bug.cgi?id=87298 Patch by Genevieve Mak <[email protected]> on 2012-05-23 Reviewed by Antonio Gomes. * WebKitSupport/InRegionScrollableArea.cpp: (BlackBerry::WebKit::InRegionScrollableArea::InRegionScrollableArea): * WebKitSupport/InRegionScrollableArea.h: (InRegionScrollableArea): git-svn-id: svn://svn.chromium.org/blink/trunk@118233 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-119
Platform::IntPoint InRegionScrollableArea::calculateMaximumScrollPosition(const Platform::IntSize& viewportSize, const Platform::IntSize& contentsSize, float overscrollLimitFactor) const
170,432
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: load_fake(png_charp param, png_bytepp profile) { char *endptr = NULL; unsigned long long int size = strtoull(param, &endptr, 0/*base*/); /* The 'fake' format is <number>*[string] */ if (endptr != NULL && *endptr == '*') { size_t len = strlen(++endptr); size_t result = (size_t)size; if (len == 0) len = 1; /* capture the terminating '\0' */ /* Now repeat that string to fill 'size' bytes. */ if (result == size && (*profile = malloc(result)) != NULL) { png_bytep out = *profile; if (len == 1) memset(out, *endptr, result); else { while (size >= len) { memcpy(out, endptr, len); out += len; size -= len; } memcpy(out, endptr, size); } return result; } else { fprintf(stderr, "%s: size exceeds system limits\n", param); exit(1); } } return 0; } Commit Message: DO NOT MERGE Update libpng to 1.6.20 BUG:23265085 Change-Id: I85199805636d771f3597b691b63bc0bf46084833 (cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82) CWE ID:
load_fake(png_charp param, png_bytepp profile) { char *endptr = NULL; uint64_t size = strtoull(param, &endptr, 0/*base*/); /* The 'fake' format is <number>*[string] */ if (endptr != NULL && *endptr == '*') { size_t len = strlen(++endptr); size_t result = (size_t)size; if (len == 0) len = 1; /* capture the terminating '\0' */ /* Now repeat that string to fill 'size' bytes. */ if (result == size && (*profile = malloc(result)) != NULL) { png_bytep out = *profile; if (len == 1) memset(out, *endptr, result); else { while (size >= len) { memcpy(out, endptr, len); out += len; size -= len; } memcpy(out, endptr, size); } return result; } else { fprintf(stderr, "%s: size exceeds system limits\n", param); exit(1); } } return 0; }
173,583
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void cstm(JF, js_Ast *stm) { js_Ast *target; int loop, cont, then, end; emitline(J, F, stm); switch (stm->type) { case AST_FUNDEC: break; case STM_BLOCK: cstmlist(J, F, stm->a); break; case STM_EMPTY: if (F->script) { emit(J, F, OP_POP); emit(J, F, OP_UNDEF); } break; case STM_VAR: cvarinit(J, F, stm->a); break; case STM_IF: if (stm->c) { cexp(J, F, stm->a); then = emitjump(J, F, OP_JTRUE); cstm(J, F, stm->c); end = emitjump(J, F, OP_JUMP); label(J, F, then); cstm(J, F, stm->b); label(J, F, end); } else { cexp(J, F, stm->a); end = emitjump(J, F, OP_JFALSE); cstm(J, F, stm->b); label(J, F, end); } break; case STM_DO: loop = here(J, F); cstm(J, F, stm->a); cont = here(J, F); cexp(J, F, stm->b); emitjumpto(J, F, OP_JTRUE, loop); labeljumps(J, F, stm->jumps, here(J,F), cont); break; case STM_WHILE: loop = here(J, F); cexp(J, F, stm->a); end = emitjump(J, F, OP_JFALSE); cstm(J, F, stm->b); emitjumpto(J, F, OP_JUMP, loop); label(J, F, end); labeljumps(J, F, stm->jumps, here(J,F), loop); break; case STM_FOR: case STM_FOR_VAR: if (stm->type == STM_FOR_VAR) { cvarinit(J, F, stm->a); } else { if (stm->a) { cexp(J, F, stm->a); emit(J, F, OP_POP); } } loop = here(J, F); if (stm->b) { cexp(J, F, stm->b); end = emitjump(J, F, OP_JFALSE); } else { end = 0; } cstm(J, F, stm->d); cont = here(J, F); if (stm->c) { cexp(J, F, stm->c); emit(J, F, OP_POP); } emitjumpto(J, F, OP_JUMP, loop); if (end) label(J, F, end); labeljumps(J, F, stm->jumps, here(J,F), cont); break; case STM_FOR_IN: case STM_FOR_IN_VAR: cexp(J, F, stm->b); emit(J, F, OP_ITERATOR); loop = here(J, F); { emit(J, F, OP_NEXTITER); end = emitjump(J, F, OP_JFALSE); cassignforin(J, F, stm); if (F->script) { emit(J, F, OP_ROT2); cstm(J, F, stm->c); emit(J, F, OP_ROT2); } else { cstm(J, F, stm->c); } emitjumpto(J, F, OP_JUMP, loop); } label(J, F, end); labeljumps(J, F, stm->jumps, here(J,F), loop); break; case STM_SWITCH: cswitch(J, F, stm->a, stm->b); labeljumps(J, F, stm->jumps, here(J,F), 0); break; case STM_LABEL: cstm(J, F, stm->b); /* skip consecutive labels */ while (stm->type == STM_LABEL) stm = stm->b; /* loops and switches have already been labelled */ if (!isloop(stm->type) && stm->type != STM_SWITCH) labeljumps(J, F, stm->jumps, here(J,F), 0); break; case STM_BREAK: if (stm->a) { target = breaktarget(J, F, stm, stm->a->string); if (!target) jsC_error(J, stm, "break label '%s' not found", stm->a->string); } else { target = breaktarget(J, F, stm, NULL); if (!target) jsC_error(J, stm, "unlabelled break must be inside loop or switch"); } cexit(J, F, STM_BREAK, stm, target); addjump(J, F, STM_BREAK, target, emitjump(J, F, OP_JUMP)); break; case STM_CONTINUE: if (stm->a) { target = continuetarget(J, F, stm, stm->a->string); if (!target) jsC_error(J, stm, "continue label '%s' not found", stm->a->string); } else { target = continuetarget(J, F, stm, NULL); if (!target) jsC_error(J, stm, "continue must be inside loop"); } cexit(J, F, STM_CONTINUE, stm, target); addjump(J, F, STM_CONTINUE, target, emitjump(J, F, OP_JUMP)); break; case STM_RETURN: if (stm->a) cexp(J, F, stm->a); else emit(J, F, OP_UNDEF); target = returntarget(J, F, stm); if (!target) jsC_error(J, stm, "return not in function"); cexit(J, F, STM_RETURN, stm, target); emit(J, F, OP_RETURN); break; case STM_THROW: cexp(J, F, stm->a); emit(J, F, OP_THROW); break; case STM_WITH: cexp(J, F, stm->a); emit(J, F, OP_WITH); cstm(J, F, stm->b); emit(J, F, OP_ENDWITH); break; case STM_TRY: if (stm->b && stm->c) { if (stm->d) ctrycatchfinally(J, F, stm->a, stm->b, stm->c, stm->d); else ctrycatch(J, F, stm->a, stm->b, stm->c); } else { ctryfinally(J, F, stm->a, stm->d); } break; case STM_DEBUGGER: emit(J, F, OP_DEBUGGER); break; default: if (F->script) { emit(J, F, OP_POP); cexp(J, F, stm); } else { cexp(J, F, stm); emit(J, F, OP_POP); } break; } } Commit Message: CWE ID: CWE-476
static void cstm(JF, js_Ast *stm) { js_Ast *target; int loop, cont, then, end; emitline(J, F, stm); switch (stm->type) { case AST_FUNDEC: break; case STM_BLOCK: cstmlist(J, F, stm->a); break; case STM_EMPTY: if (F->script) { emit(J, F, OP_POP); emit(J, F, OP_UNDEF); } break; case STM_VAR: cvarinit(J, F, stm->a); break; case STM_IF: if (stm->c) { cexp(J, F, stm->a); then = emitjump(J, F, OP_JTRUE); cstm(J, F, stm->c); end = emitjump(J, F, OP_JUMP); label(J, F, then); cstm(J, F, stm->b); label(J, F, end); } else { cexp(J, F, stm->a); end = emitjump(J, F, OP_JFALSE); cstm(J, F, stm->b); label(J, F, end); } break; case STM_DO: loop = here(J, F); cstm(J, F, stm->a); cont = here(J, F); cexp(J, F, stm->b); emitjumpto(J, F, OP_JTRUE, loop); labeljumps(J, F, stm->jumps, here(J,F), cont); break; case STM_WHILE: loop = here(J, F); cexp(J, F, stm->a); end = emitjump(J, F, OP_JFALSE); cstm(J, F, stm->b); emitjumpto(J, F, OP_JUMP, loop); label(J, F, end); labeljumps(J, F, stm->jumps, here(J,F), loop); break; case STM_FOR: case STM_FOR_VAR: if (stm->type == STM_FOR_VAR) { cvarinit(J, F, stm->a); } else { if (stm->a) { cexp(J, F, stm->a); emit(J, F, OP_POP); } } loop = here(J, F); if (stm->b) { cexp(J, F, stm->b); end = emitjump(J, F, OP_JFALSE); } else { end = 0; } cstm(J, F, stm->d); cont = here(J, F); if (stm->c) { cexp(J, F, stm->c); emit(J, F, OP_POP); } emitjumpto(J, F, OP_JUMP, loop); if (end) label(J, F, end); labeljumps(J, F, stm->jumps, here(J,F), cont); break; case STM_FOR_IN: case STM_FOR_IN_VAR: cexp(J, F, stm->b); emit(J, F, OP_ITERATOR); loop = here(J, F); { emit(J, F, OP_NEXTITER); end = emitjump(J, F, OP_JFALSE); cassignforin(J, F, stm); if (F->script) { emit(J, F, OP_ROT2); cstm(J, F, stm->c); emit(J, F, OP_ROT2); } else { cstm(J, F, stm->c); } emitjumpto(J, F, OP_JUMP, loop); } label(J, F, end); labeljumps(J, F, stm->jumps, here(J,F), loop); break; case STM_SWITCH: cswitch(J, F, stm->a, stm->b); labeljumps(J, F, stm->jumps, here(J,F), 0); break; case STM_LABEL: cstm(J, F, stm->b); /* skip consecutive labels */ while (stm->type == STM_LABEL) stm = stm->b; /* loops and switches have already been labelled */ if (!isloop(stm->type) && stm->type != STM_SWITCH) labeljumps(J, F, stm->jumps, here(J,F), 0); break; case STM_BREAK: if (stm->a) { target = breaktarget(J, F, stm->parent, stm->a->string); if (!target) jsC_error(J, stm, "break label '%s' not found", stm->a->string); } else { target = breaktarget(J, F, stm->parent, NULL); if (!target) jsC_error(J, stm, "unlabelled break must be inside loop or switch"); } cexit(J, F, STM_BREAK, stm, target); addjump(J, F, STM_BREAK, target, emitjump(J, F, OP_JUMP)); break; case STM_CONTINUE: if (stm->a) { target = continuetarget(J, F, stm->parent, stm->a->string); if (!target) jsC_error(J, stm, "continue label '%s' not found", stm->a->string); } else { target = continuetarget(J, F, stm->parent, NULL); if (!target) jsC_error(J, stm, "continue must be inside loop"); } cexit(J, F, STM_CONTINUE, stm, target); addjump(J, F, STM_CONTINUE, target, emitjump(J, F, OP_JUMP)); break; case STM_RETURN: if (stm->a) cexp(J, F, stm->a); else emit(J, F, OP_UNDEF); target = returntarget(J, F, stm->parent); if (!target) jsC_error(J, stm, "return not in function"); cexit(J, F, STM_RETURN, stm, target); emit(J, F, OP_RETURN); break; case STM_THROW: cexp(J, F, stm->a); emit(J, F, OP_THROW); break; case STM_WITH: cexp(J, F, stm->a); emit(J, F, OP_WITH); cstm(J, F, stm->b); emit(J, F, OP_ENDWITH); break; case STM_TRY: if (stm->b && stm->c) { if (stm->d) ctrycatchfinally(J, F, stm->a, stm->b, stm->c, stm->d); else ctrycatch(J, F, stm->a, stm->b, stm->c); } else { ctryfinally(J, F, stm->a, stm->d); } break; case STM_DEBUGGER: emit(J, F, OP_DEBUGGER); break; default: if (F->script) { emit(J, F, OP_POP); cexp(J, F, stm); } else { cexp(J, F, stm); emit(J, F, OP_POP); } break; } }
164,901
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int git_delta_apply( void **out, size_t *out_len, const unsigned char *base, size_t base_len, const unsigned char *delta, size_t delta_len) { const unsigned char *delta_end = delta + delta_len; size_t base_sz, res_sz, alloc_sz; unsigned char *res_dp; *out = NULL; *out_len = 0; /* Check that the base size matches the data we were given; * if not we would underflow while accessing data from the * base object, resulting in data corruption or segfault. */ if ((hdr_sz(&base_sz, &delta, delta_end) < 0) || (base_sz != base_len)) { giterr_set(GITERR_INVALID, "failed to apply delta: base size does not match given data"); return -1; } if (hdr_sz(&res_sz, &delta, delta_end) < 0) { giterr_set(GITERR_INVALID, "failed to apply delta: base size does not match given data"); return -1; } GITERR_CHECK_ALLOC_ADD(&alloc_sz, res_sz, 1); res_dp = git__malloc(alloc_sz); GITERR_CHECK_ALLOC(res_dp); res_dp[res_sz] = '\0'; *out = res_dp; *out_len = res_sz; while (delta < delta_end) { unsigned char cmd = *delta++; if (cmd & 0x80) { /* cmd is a copy instruction; copy from the base. */ size_t off = 0, len = 0; if (cmd & 0x01) off = *delta++; if (cmd & 0x02) off |= *delta++ << 8UL; if (cmd & 0x04) off |= *delta++ << 16UL; if (cmd & 0x08) off |= *delta++ << 24UL; if (cmd & 0x10) len = *delta++; if (cmd & 0x20) len |= *delta++ << 8UL; if (cmd & 0x40) len |= *delta++ << 16UL; if (!len) len = 0x10000; if (base_len < off + len || res_sz < len) goto fail; memcpy(res_dp, base + off, len); res_dp += len; res_sz -= len; } else if (cmd) { /* cmd is a literal insert instruction; copy from * the delta stream itself. */ if (delta_end - delta < cmd || res_sz < cmd) goto fail; memcpy(res_dp, delta, cmd); delta += cmd; res_dp += cmd; res_sz -= cmd; } else { /* cmd == 0 is reserved for future encodings. */ goto fail; } } if (delta != delta_end || res_sz) goto fail; return 0; fail: git__free(*out); *out = NULL; *out_len = 0; giterr_set(GITERR_INVALID, "failed to apply delta"); return -1; } Commit Message: delta: fix sign-extension of big left-shift Our delta code was originally adapted from JGit, which itself adapted it from git itself. Due to this heritage, we inherited a bug from git.git in how we compute the delta offset, which was fixed upstream in 48fb7deb5 (Fix big left-shifts of unsigned char, 2009-06-17). As explained by Linus: Shifting 'unsigned char' or 'unsigned short' left can result in sign extension errors, since the C integer promotion rules means that the unsigned char/short will get implicitly promoted to a signed 'int' due to the shift (or due to other operations). This normally doesn't matter, but if you shift things up sufficiently, it will now set the sign bit in 'int', and a subsequent cast to a bigger type (eg 'long' or 'unsigned long') will now sign-extend the value despite the original expression being unsigned. One example of this would be something like unsigned long size; unsigned char c; size += c << 24; where despite all the variables being unsigned, 'c << 24' ends up being a signed entity, and will get sign-extended when then doing the addition in an 'unsigned long' type. Since git uses 'unsigned char' pointers extensively, we actually have this bug in a couple of places. In our delta code, we inherited such a bogus shift when computing the offset at which the delta base is to be found. Due to the sign extension we can end up with an offset where all the bits are set. This can allow an arbitrary memory read, as the addition in `base_len < off + len` can now overflow if `off` has all its bits set. Fix the issue by casting the result of `*delta++ << 24UL` to an unsigned integer again. Add a test with a crafted delta that would actually succeed with an out-of-bounds read in case where the cast wouldn't exist. Reported-by: Riccardo Schirone <[email protected]> Test-provided-by: Riccardo Schirone <[email protected]> CWE ID: CWE-125
int git_delta_apply( void **out, size_t *out_len, const unsigned char *base, size_t base_len, const unsigned char *delta, size_t delta_len) { const unsigned char *delta_end = delta + delta_len; size_t base_sz, res_sz, alloc_sz; unsigned char *res_dp; *out = NULL; *out_len = 0; /* * Check that the base size matches the data we were given; * if not we would underflow while accessing data from the * base object, resulting in data corruption or segfault. */ if ((hdr_sz(&base_sz, &delta, delta_end) < 0) || (base_sz != base_len)) { giterr_set(GITERR_INVALID, "failed to apply delta: base size does not match given data"); return -1; } if (hdr_sz(&res_sz, &delta, delta_end) < 0) { giterr_set(GITERR_INVALID, "failed to apply delta: base size does not match given data"); return -1; } GITERR_CHECK_ALLOC_ADD(&alloc_sz, res_sz, 1); res_dp = git__malloc(alloc_sz); GITERR_CHECK_ALLOC(res_dp); res_dp[res_sz] = '\0'; *out = res_dp; *out_len = res_sz; while (delta < delta_end) { unsigned char cmd = *delta++; if (cmd & 0x80) { /* cmd is a copy instruction; copy from the base. */ size_t off = 0, len = 0; if (cmd & 0x01) off = *delta++; if (cmd & 0x02) off |= *delta++ << 8UL; if (cmd & 0x04) off |= *delta++ << 16UL; if (cmd & 0x08) off |= ((unsigned) *delta++ << 24UL); if (cmd & 0x10) len = *delta++; if (cmd & 0x20) len |= *delta++ << 8UL; if (cmd & 0x40) len |= *delta++ << 16UL; if (!len) len = 0x10000; if (base_len < off + len || res_sz < len) goto fail; memcpy(res_dp, base + off, len); res_dp += len; res_sz -= len; } else if (cmd) { /* * cmd is a literal insert instruction; copy from * the delta stream itself. */ if (delta_end - delta < cmd || res_sz < cmd) goto fail; memcpy(res_dp, delta, cmd); delta += cmd; res_dp += cmd; res_sz -= cmd; } else { /* cmd == 0 is reserved for future encodings. */ goto fail; } } if (delta != delta_end || res_sz) goto fail; return 0; fail: git__free(*out); *out = NULL; *out_len = 0; giterr_set(GITERR_INVALID, "failed to apply delta"); return -1; }
170,168
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int __ptrace_may_access(struct task_struct *task, unsigned int mode) { const struct cred *cred = current_cred(), *tcred; /* May we inspect the given task? * This check is used both for attaching with ptrace * and for allowing access to sensitive information in /proc. * * ptrace_attach denies several cases that /proc allows * because setting up the necessary parent/child relationship * or halting the specified task is impossible. */ int dumpable = 0; /* Don't let security modules deny introspection */ if (same_thread_group(task, current)) return 0; rcu_read_lock(); tcred = __task_cred(task); if (uid_eq(cred->uid, tcred->euid) && uid_eq(cred->uid, tcred->suid) && uid_eq(cred->uid, tcred->uid) && gid_eq(cred->gid, tcred->egid) && gid_eq(cred->gid, tcred->sgid) && gid_eq(cred->gid, tcred->gid)) goto ok; if (ptrace_has_cap(tcred->user_ns, mode)) goto ok; rcu_read_unlock(); return -EPERM; ok: rcu_read_unlock(); smp_rmb(); if (task->mm) dumpable = get_dumpable(task->mm); rcu_read_lock(); if (!dumpable && !ptrace_has_cap(__task_cred(task)->user_ns, mode)) { rcu_read_unlock(); return -EPERM; } rcu_read_unlock(); return security_ptrace_access_check(task, mode); } Commit Message: exec/ptrace: fix get_dumpable() incorrect tests The get_dumpable() return value is not boolean. Most users of the function actually want to be testing for non-SUID_DUMP_USER(1) rather than SUID_DUMP_DISABLE(0). The SUID_DUMP_ROOT(2) is also considered a protected state. Almost all places did this correctly, excepting the two places fixed in this patch. Wrong logic: if (dumpable == SUID_DUMP_DISABLE) { /* be protective */ } or if (dumpable == 0) { /* be protective */ } or if (!dumpable) { /* be protective */ } Correct logic: if (dumpable != SUID_DUMP_USER) { /* be protective */ } or if (dumpable != 1) { /* be protective */ } Without this patch, if the system had set the sysctl fs/suid_dumpable=2, a user was able to ptrace attach to processes that had dropped privileges to that user. (This may have been partially mitigated if Yama was enabled.) The macros have been moved into the file that declares get/set_dumpable(), which means things like the ia64 code can see them too. CVE-2013-2929 Reported-by: Vasily Kulikov <[email protected]> Signed-off-by: Kees Cook <[email protected]> Cc: "Luck, Tony" <[email protected]> Cc: Oleg Nesterov <[email protected]> Cc: "Eric W. Biederman" <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> CWE ID: CWE-264
static int __ptrace_may_access(struct task_struct *task, unsigned int mode) { const struct cred *cred = current_cred(), *tcred; /* May we inspect the given task? * This check is used both for attaching with ptrace * and for allowing access to sensitive information in /proc. * * ptrace_attach denies several cases that /proc allows * because setting up the necessary parent/child relationship * or halting the specified task is impossible. */ int dumpable = 0; /* Don't let security modules deny introspection */ if (same_thread_group(task, current)) return 0; rcu_read_lock(); tcred = __task_cred(task); if (uid_eq(cred->uid, tcred->euid) && uid_eq(cred->uid, tcred->suid) && uid_eq(cred->uid, tcred->uid) && gid_eq(cred->gid, tcred->egid) && gid_eq(cred->gid, tcred->sgid) && gid_eq(cred->gid, tcred->gid)) goto ok; if (ptrace_has_cap(tcred->user_ns, mode)) goto ok; rcu_read_unlock(); return -EPERM; ok: rcu_read_unlock(); smp_rmb(); if (task->mm) dumpable = get_dumpable(task->mm); rcu_read_lock(); if (dumpable != SUID_DUMP_USER && !ptrace_has_cap(__task_cred(task)->user_ns, mode)) { rcu_read_unlock(); return -EPERM; } rcu_read_unlock(); return security_ptrace_access_check(task, mode); }
166,049
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int verify_vc_kbmode(int fd) { int curr_mode; /* * Make sure we only adjust consoles in K_XLATE or K_UNICODE mode. * Otherwise we would (likely) interfere with X11's processing of the * key events. * * http://lists.freedesktop.org/archives/systemd-devel/2013-February/008573.html */ if (ioctl(fd, KDGKBMODE, &curr_mode) < 0) return -errno; return IN_SET(curr_mode, K_XLATE, K_UNICODE) ? 0 : -EBUSY; } Commit Message: Merge pull request #12378 from rbalint/vt-kbd-reset-check VT kbd reset check CWE ID: CWE-255
static int verify_vc_kbmode(int fd) {
169,781
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: LocalSiteCharacteristicsWebContentsObserverTest() { scoped_feature_list_.InitAndEnableFeature( features::kSiteCharacteristicsDatabase); } Commit Message: Connect the LocalDB to TabManager. Bug: 773382 Change-Id: Iec8fe5226ee175105d51f300f30b4865478ac099 Reviewed-on: https://chromium-review.googlesource.com/1118611 Commit-Queue: Sébastien Marchand <[email protected]> Reviewed-by: François Doray <[email protected]> Cr-Commit-Position: refs/heads/master@{#572871} CWE ID:
LocalSiteCharacteristicsWebContentsObserverTest() {
172,217
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int ssl3_write_bytes(SSL *s, int type, const void *buf_, int len) { const unsigned char *buf = buf_; int tot; unsigned int n, split_send_fragment, maxpipes; #if !defined(OPENSSL_NO_MULTIBLOCK) && EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK unsigned int max_send_fragment, nw; unsigned int u_len = (unsigned int)len; #endif SSL3_BUFFER *wb = &s->rlayer.wbuf[0]; int i; if (len < 0) { SSLerr(SSL_F_SSL3_WRITE_BYTES, SSL_R_SSL_NEGATIVE_LENGTH); return -1; } s->rwstate = SSL_NOTHING; tot = s->rlayer.wnum; /* * ensure that if we end up with a smaller value of data to write out * than the the original len from a write which didn't complete for * non-blocking I/O and also somehow ended up avoiding the check for * this in ssl3_write_pending/SSL_R_BAD_WRITE_RETRY as it must never be * possible to end up with (len-tot) as a large number that will then * promptly send beyond the end of the users buffer ... so we trap and * report the error in a way the user will notice */ if ((unsigned int)len < s->rlayer.wnum) { SSLerr(SSL_F_SSL3_WRITE_BYTES, SSL_R_BAD_LENGTH); return -1; } s->rlayer.wnum = 0; if (SSL_in_init(s) && !ossl_statem_get_in_handshake(s)) { i = s->handshake_func(s); if (i < 0) return (i); if (i == 0) { SSLerr(SSL_F_SSL3_WRITE_BYTES, SSL_R_SSL_HANDSHAKE_FAILURE); return -1; } } /* * first check if there is a SSL3_BUFFER still being written out. This * will happen with non blocking IO */ if (wb->left != 0) { i = ssl3_write_pending(s, type, &buf[tot], s->rlayer.wpend_tot); if (i <= 0) { /* XXX should we ssl3_release_write_buffer if i<0? */ s->rlayer.wnum = tot; return i; } tot += i; /* this might be last fragment */ } #if !defined(OPENSSL_NO_MULTIBLOCK) && EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK /* * Depending on platform multi-block can deliver several *times* * better performance. Downside is that it has to allocate * jumbo buffer to accommodate up to 8 records, but the * compromise is considered worthy. */ if (type == SSL3_RT_APPLICATION_DATA && u_len >= 4 * (max_send_fragment = s->max_send_fragment) && s->compress == NULL && s->msg_callback == NULL && !SSL_USE_ETM(s) && SSL_USE_EXPLICIT_IV(s) && EVP_CIPHER_flags(EVP_CIPHER_CTX_cipher(s->enc_write_ctx)) & EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK) { unsigned char aad[13]; EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM mb_param; int packlen; /* minimize address aliasing conflicts */ if ((max_send_fragment & 0xfff) == 0) max_send_fragment -= 512; if (tot == 0 || wb->buf == NULL) { /* allocate jumbo buffer */ ssl3_release_write_buffer(s); packlen = EVP_CIPHER_CTX_ctrl(s->enc_write_ctx, EVP_CTRL_TLS1_1_MULTIBLOCK_MAX_BUFSIZE, max_send_fragment, NULL); if (u_len >= 8 * max_send_fragment) packlen *= 8; else packlen *= 4; if (!ssl3_setup_write_buffer(s, 1, packlen)) { SSLerr(SSL_F_SSL3_WRITE_BYTES, ERR_R_MALLOC_FAILURE); return -1; } } else if (tot == len) { /* done? */ /* free jumbo buffer */ ssl3_release_write_buffer(s); return tot; } n = (len - tot); for (;;) { if (n < 4 * max_send_fragment) { /* free jumbo buffer */ ssl3_release_write_buffer(s); break; } if (s->s3->alert_dispatch) { i = s->method->ssl_dispatch_alert(s); if (i <= 0) { s->rlayer.wnum = tot; return i; } } if (n >= 8 * max_send_fragment) nw = max_send_fragment * (mb_param.interleave = 8); else nw = max_send_fragment * (mb_param.interleave = 4); memcpy(aad, s->rlayer.write_sequence, 8); aad[8] = type; aad[9] = (unsigned char)(s->version >> 8); aad[10] = (unsigned char)(s->version); aad[11] = 0; aad[12] = 0; mb_param.out = NULL; mb_param.inp = aad; mb_param.len = nw; packlen = EVP_CIPHER_CTX_ctrl(s->enc_write_ctx, EVP_CTRL_TLS1_1_MULTIBLOCK_AAD, sizeof(mb_param), &mb_param); if (packlen <= 0 || packlen > (int)wb->len) { /* never happens */ /* free jumbo buffer */ ssl3_release_write_buffer(s); break; } mb_param.out = wb->buf; mb_param.inp = &buf[tot]; mb_param.len = nw; if (EVP_CIPHER_CTX_ctrl(s->enc_write_ctx, EVP_CTRL_TLS1_1_MULTIBLOCK_ENCRYPT, sizeof(mb_param), &mb_param) <= 0) return -1; s->rlayer.write_sequence[7] += mb_param.interleave; if (s->rlayer.write_sequence[7] < mb_param.interleave) { int j = 6; while (j >= 0 && (++s->rlayer.write_sequence[j--]) == 0) ; } wb->offset = 0; wb->left = packlen; s->rlayer.wpend_tot = nw; s->rlayer.wpend_buf = &buf[tot]; s->rlayer.wpend_type = type; s->rlayer.wpend_ret = nw; i = ssl3_write_pending(s, type, &buf[tot], nw); if (i <= 0) { if (i < 0 && (!s->wbio || !BIO_should_retry(s->wbio))) { /* free jumbo buffer */ ssl3_release_write_buffer(s); } s->rlayer.wnum = tot; return i; } if (i == (int)n) { /* free jumbo buffer */ ssl3_release_write_buffer(s); return tot + i; } n -= i; tot += i; } } else #endif if (tot == len) { /* done? */ if (s->mode & SSL_MODE_RELEASE_BUFFERS && !SSL_IS_DTLS(s)) ssl3_release_write_buffer(s); return tot; } n = (len - tot); split_send_fragment = s->split_send_fragment; /* * If max_pipelines is 0 then this means "undefined" and we default to * 1 pipeline. Similarly if the cipher does not support pipelined * processing then we also only use 1 pipeline, or if we're not using * explicit IVs */ maxpipes = s->max_pipelines; if (maxpipes > SSL_MAX_PIPELINES) { /* * We should have prevented this when we set max_pipelines so we * shouldn't get here */ SSLerr(SSL_F_SSL3_WRITE_BYTES, ERR_R_INTERNAL_ERROR); return -1; } if (maxpipes == 0 || s->enc_write_ctx == NULL || !(EVP_CIPHER_flags(EVP_CIPHER_CTX_cipher(s->enc_write_ctx)) & EVP_CIPH_FLAG_PIPELINE) || !SSL_USE_EXPLICIT_IV(s)) maxpipes = 1; if (s->max_send_fragment == 0 || split_send_fragment > s->max_send_fragment || split_send_fragment == 0) { /* * We should have prevented this when we set the split and max send * fragments so we shouldn't get here */ SSLerr(SSL_F_SSL3_WRITE_BYTES, ERR_R_INTERNAL_ERROR); return -1; } for (;;) { unsigned int pipelens[SSL_MAX_PIPELINES], tmppipelen, remain; unsigned int numpipes, j; if (n == 0) numpipes = 1; else numpipes = ((n - 1) / split_send_fragment) + 1; if (numpipes > maxpipes) numpipes = maxpipes; if (n / numpipes >= s->max_send_fragment) { /* * We have enough data to completely fill all available * pipelines */ for (j = 0; j < numpipes; j++) { pipelens[j] = s->max_send_fragment; } } else { /* We can partially fill all available pipelines */ tmppipelen = n / numpipes; remain = n % numpipes; for (j = 0; j < numpipes; j++) { pipelens[j] = tmppipelen; if (j < remain) pipelens[j]++; } } i = do_ssl3_write(s, type, &(buf[tot]), pipelens, numpipes, 0); if (i <= 0) { /* XXX should we ssl3_release_write_buffer if i<0? */ s->rlayer.wnum = tot; return i; } if ((i == (int)n) || (type == SSL3_RT_APPLICATION_DATA && (s->mode & SSL_MODE_ENABLE_PARTIAL_WRITE))) { /* * next chunk of data should get another prepended empty fragment * in ciphersuites with known-IV weakness: */ s->s3->empty_fragment_done = 0; if ((i == (int)n) && s->mode & SSL_MODE_RELEASE_BUFFERS && !SSL_IS_DTLS(s)) ssl3_release_write_buffer(s); return tot + i; } n -= i; tot += i; } } Commit Message: Don't change the state of the ETM flags until CCS processing Changing the ciphersuite during a renegotiation can result in a crash leading to a DoS attack. ETM has not been implemented in 1.1.0 for DTLS so this is TLS only. The problem is caused by changing the flag indicating whether to use ETM or not immediately on negotiation of ETM, rather than at CCS. Therefore, during a renegotiation, if the ETM state is changing (usually due to a change of ciphersuite), then an error/crash will occur. Due to the fact that there are separate CCS messages for read and write we actually now need two flags to determine whether to use ETM or not. CVE-2017-3733 Reviewed-by: Richard Levitte <[email protected]> CWE ID: CWE-20
int ssl3_write_bytes(SSL *s, int type, const void *buf_, int len) { const unsigned char *buf = buf_; int tot; unsigned int n, split_send_fragment, maxpipes; #if !defined(OPENSSL_NO_MULTIBLOCK) && EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK unsigned int max_send_fragment, nw; unsigned int u_len = (unsigned int)len; #endif SSL3_BUFFER *wb = &s->rlayer.wbuf[0]; int i; if (len < 0) { SSLerr(SSL_F_SSL3_WRITE_BYTES, SSL_R_SSL_NEGATIVE_LENGTH); return -1; } s->rwstate = SSL_NOTHING; tot = s->rlayer.wnum; /* * ensure that if we end up with a smaller value of data to write out * than the the original len from a write which didn't complete for * non-blocking I/O and also somehow ended up avoiding the check for * this in ssl3_write_pending/SSL_R_BAD_WRITE_RETRY as it must never be * possible to end up with (len-tot) as a large number that will then * promptly send beyond the end of the users buffer ... so we trap and * report the error in a way the user will notice */ if ((unsigned int)len < s->rlayer.wnum) { SSLerr(SSL_F_SSL3_WRITE_BYTES, SSL_R_BAD_LENGTH); return -1; } s->rlayer.wnum = 0; if (SSL_in_init(s) && !ossl_statem_get_in_handshake(s)) { i = s->handshake_func(s); if (i < 0) return (i); if (i == 0) { SSLerr(SSL_F_SSL3_WRITE_BYTES, SSL_R_SSL_HANDSHAKE_FAILURE); return -1; } } /* * first check if there is a SSL3_BUFFER still being written out. This * will happen with non blocking IO */ if (wb->left != 0) { i = ssl3_write_pending(s, type, &buf[tot], s->rlayer.wpend_tot); if (i <= 0) { /* XXX should we ssl3_release_write_buffer if i<0? */ s->rlayer.wnum = tot; return i; } tot += i; /* this might be last fragment */ } #if !defined(OPENSSL_NO_MULTIBLOCK) && EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK /* * Depending on platform multi-block can deliver several *times* * better performance. Downside is that it has to allocate * jumbo buffer to accommodate up to 8 records, but the * compromise is considered worthy. */ if (type == SSL3_RT_APPLICATION_DATA && u_len >= 4 * (max_send_fragment = s->max_send_fragment) && s->compress == NULL && s->msg_callback == NULL && !SSL_WRITE_ETM(s) && SSL_USE_EXPLICIT_IV(s) && EVP_CIPHER_flags(EVP_CIPHER_CTX_cipher(s->enc_write_ctx)) & EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK) { unsigned char aad[13]; EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM mb_param; int packlen; /* minimize address aliasing conflicts */ if ((max_send_fragment & 0xfff) == 0) max_send_fragment -= 512; if (tot == 0 || wb->buf == NULL) { /* allocate jumbo buffer */ ssl3_release_write_buffer(s); packlen = EVP_CIPHER_CTX_ctrl(s->enc_write_ctx, EVP_CTRL_TLS1_1_MULTIBLOCK_MAX_BUFSIZE, max_send_fragment, NULL); if (u_len >= 8 * max_send_fragment) packlen *= 8; else packlen *= 4; if (!ssl3_setup_write_buffer(s, 1, packlen)) { SSLerr(SSL_F_SSL3_WRITE_BYTES, ERR_R_MALLOC_FAILURE); return -1; } } else if (tot == len) { /* done? */ /* free jumbo buffer */ ssl3_release_write_buffer(s); return tot; } n = (len - tot); for (;;) { if (n < 4 * max_send_fragment) { /* free jumbo buffer */ ssl3_release_write_buffer(s); break; } if (s->s3->alert_dispatch) { i = s->method->ssl_dispatch_alert(s); if (i <= 0) { s->rlayer.wnum = tot; return i; } } if (n >= 8 * max_send_fragment) nw = max_send_fragment * (mb_param.interleave = 8); else nw = max_send_fragment * (mb_param.interleave = 4); memcpy(aad, s->rlayer.write_sequence, 8); aad[8] = type; aad[9] = (unsigned char)(s->version >> 8); aad[10] = (unsigned char)(s->version); aad[11] = 0; aad[12] = 0; mb_param.out = NULL; mb_param.inp = aad; mb_param.len = nw; packlen = EVP_CIPHER_CTX_ctrl(s->enc_write_ctx, EVP_CTRL_TLS1_1_MULTIBLOCK_AAD, sizeof(mb_param), &mb_param); if (packlen <= 0 || packlen > (int)wb->len) { /* never happens */ /* free jumbo buffer */ ssl3_release_write_buffer(s); break; } mb_param.out = wb->buf; mb_param.inp = &buf[tot]; mb_param.len = nw; if (EVP_CIPHER_CTX_ctrl(s->enc_write_ctx, EVP_CTRL_TLS1_1_MULTIBLOCK_ENCRYPT, sizeof(mb_param), &mb_param) <= 0) return -1; s->rlayer.write_sequence[7] += mb_param.interleave; if (s->rlayer.write_sequence[7] < mb_param.interleave) { int j = 6; while (j >= 0 && (++s->rlayer.write_sequence[j--]) == 0) ; } wb->offset = 0; wb->left = packlen; s->rlayer.wpend_tot = nw; s->rlayer.wpend_buf = &buf[tot]; s->rlayer.wpend_type = type; s->rlayer.wpend_ret = nw; i = ssl3_write_pending(s, type, &buf[tot], nw); if (i <= 0) { if (i < 0 && (!s->wbio || !BIO_should_retry(s->wbio))) { /* free jumbo buffer */ ssl3_release_write_buffer(s); } s->rlayer.wnum = tot; return i; } if (i == (int)n) { /* free jumbo buffer */ ssl3_release_write_buffer(s); return tot + i; } n -= i; tot += i; } } else #endif if (tot == len) { /* done? */ if (s->mode & SSL_MODE_RELEASE_BUFFERS && !SSL_IS_DTLS(s)) ssl3_release_write_buffer(s); return tot; } n = (len - tot); split_send_fragment = s->split_send_fragment; /* * If max_pipelines is 0 then this means "undefined" and we default to * 1 pipeline. Similarly if the cipher does not support pipelined * processing then we also only use 1 pipeline, or if we're not using * explicit IVs */ maxpipes = s->max_pipelines; if (maxpipes > SSL_MAX_PIPELINES) { /* * We should have prevented this when we set max_pipelines so we * shouldn't get here */ SSLerr(SSL_F_SSL3_WRITE_BYTES, ERR_R_INTERNAL_ERROR); return -1; } if (maxpipes == 0 || s->enc_write_ctx == NULL || !(EVP_CIPHER_flags(EVP_CIPHER_CTX_cipher(s->enc_write_ctx)) & EVP_CIPH_FLAG_PIPELINE) || !SSL_USE_EXPLICIT_IV(s)) maxpipes = 1; if (s->max_send_fragment == 0 || split_send_fragment > s->max_send_fragment || split_send_fragment == 0) { /* * We should have prevented this when we set the split and max send * fragments so we shouldn't get here */ SSLerr(SSL_F_SSL3_WRITE_BYTES, ERR_R_INTERNAL_ERROR); return -1; } for (;;) { unsigned int pipelens[SSL_MAX_PIPELINES], tmppipelen, remain; unsigned int numpipes, j; if (n == 0) numpipes = 1; else numpipes = ((n - 1) / split_send_fragment) + 1; if (numpipes > maxpipes) numpipes = maxpipes; if (n / numpipes >= s->max_send_fragment) { /* * We have enough data to completely fill all available * pipelines */ for (j = 0; j < numpipes; j++) { pipelens[j] = s->max_send_fragment; } } else { /* We can partially fill all available pipelines */ tmppipelen = n / numpipes; remain = n % numpipes; for (j = 0; j < numpipes; j++) { pipelens[j] = tmppipelen; if (j < remain) pipelens[j]++; } } i = do_ssl3_write(s, type, &(buf[tot]), pipelens, numpipes, 0); if (i <= 0) { /* XXX should we ssl3_release_write_buffer if i<0? */ s->rlayer.wnum = tot; return i; } if ((i == (int)n) || (type == SSL3_RT_APPLICATION_DATA && (s->mode & SSL_MODE_ENABLE_PARTIAL_WRITE))) { /* * next chunk of data should get another prepended empty fragment * in ciphersuites with known-IV weakness: */ s->s3->empty_fragment_done = 0; if ((i == (int)n) && s->mode & SSL_MODE_RELEASE_BUFFERS && !SSL_IS_DTLS(s)) ssl3_release_write_buffer(s); return tot + i; } n -= i; tot += i; } }
168,421
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: ZSTD_compressSequences_internal(seqStore_t* seqStorePtr, ZSTD_entropyCTables_t const* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, ZSTD_CCtx_params const* cctxParams, void* dst, size_t dstCapacity, void* workspace, size_t wkspSize, const int bmi2) { const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; ZSTD_strategy const strategy = cctxParams->cParams.strategy; U32 count[MaxSeq+1]; FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable; FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable; FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable; U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */ const seqDef* const sequences = seqStorePtr->sequencesStart; const BYTE* const ofCodeTable = seqStorePtr->ofCode; const BYTE* const llCodeTable = seqStorePtr->llCode; const BYTE* const mlCodeTable = seqStorePtr->mlCode; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; BYTE* seqHead; BYTE* lastNCount = NULL; ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog))); /* Compress literals */ { const BYTE* const literals = seqStorePtr->litStart; size_t const litSize = seqStorePtr->lit - literals; int const disableLiteralCompression = (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0); size_t const cSize = ZSTD_compressLiterals( &prevEntropy->huf, &nextEntropy->huf, cctxParams->cParams.strategy, disableLiteralCompression, op, dstCapacity, literals, litSize, workspace, wkspSize, bmi2); if (ZSTD_isError(cSize)) return cSize; assert(cSize <= dstCapacity); op += cSize; } /* Sequences Header */ if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/) return ERROR(dstSize_tooSmall); if (nbSeq < 0x7F) *op++ = (BYTE)nbSeq; else if (nbSeq < LONGNBSEQ) op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2; else op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3; if (nbSeq==0) { /* Copy the old tables over as if we repeated them */ memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse)); return op - ostart; } /* seqHead : flags for FSE encoding type */ seqHead = op++; /* convert length/distances into codes */ ZSTD_seqToCodes(seqStorePtr); /* build CTable for Literal Lengths */ { U32 max = MaxLL; size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ DEBUGLOG(5, "Building LL table"); nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode; LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode, count, max, mostFrequent, nbSeq, LLFSELog, prevEntropy->fse.litlengthCTable, LL_defaultNorm, LL_defaultNormLog, ZSTD_defaultAllowed, strategy); assert(set_basic < set_compressed && set_rle < set_compressed); assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype, count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL, prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable), workspace, wkspSize); if (ZSTD_isError(countSize)) return countSize; if (LLtype == set_compressed) lastNCount = op; op += countSize; } } /* build CTable for Offsets */ { U32 max = MaxOff; size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; DEBUGLOG(5, "Building OF table"); nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode; Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode, count, max, mostFrequent, nbSeq, OffFSELog, prevEntropy->fse.offcodeCTable, OF_defaultNorm, OF_defaultNormLog, defaultPolicy, strategy); assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype, count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable), workspace, wkspSize); if (ZSTD_isError(countSize)) return countSize; if (Offtype == set_compressed) lastNCount = op; op += countSize; } } /* build CTable for MatchLengths */ { U32 max = MaxML; size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ DEBUGLOG(5, "Building ML table"); nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode; MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode, count, max, mostFrequent, nbSeq, MLFSELog, prevEntropy->fse.matchlengthCTable, ML_defaultNorm, ML_defaultNormLog, ZSTD_defaultAllowed, strategy); assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype, count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML, prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable), workspace, wkspSize); if (ZSTD_isError(countSize)) return countSize; if (MLtype == set_compressed) lastNCount = op; op += countSize; } } *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); { size_t const bitstreamSize = ZSTD_encodeSequences( op, oend - op, CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets, bmi2); if (ZSTD_isError(bitstreamSize)) return bitstreamSize; op += bitstreamSize; /* zstd versions <= 1.3.4 mistakenly report corruption when * FSE_readNCount() recieves a buffer < 4 bytes. * Fixed by https://github.com/facebook/zstd/pull/1146. * This can happen when the last set_compressed table present is 2 * bytes and the bitstream is only one byte. * In this exceedingly rare case, we will simply emit an uncompressed * block, since it isn't worth optimizing. */ if (lastNCount && (op - lastNCount) < 4) { /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */ assert(op - lastNCount == 3); DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by " "emitting an uncompressed block."); return 0; } } return op - ostart; } Commit Message: fixed T36302429 CWE ID: CWE-362
ZSTD_compressSequences_internal(seqStore_t* seqStorePtr, ZSTD_entropyCTables_t const* prevEntropy, ZSTD_entropyCTables_t* nextEntropy, ZSTD_CCtx_params const* cctxParams, void* dst, size_t dstCapacity, void* workspace, size_t wkspSize, const int bmi2) { const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; ZSTD_strategy const strategy = cctxParams->cParams.strategy; U32 count[MaxSeq+1]; FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable; FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable; FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable; U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */ const seqDef* const sequences = seqStorePtr->sequencesStart; const BYTE* const ofCodeTable = seqStorePtr->ofCode; const BYTE* const llCodeTable = seqStorePtr->llCode; const BYTE* const mlCodeTable = seqStorePtr->mlCode; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; BYTE* seqHead; BYTE* lastNCount = NULL; ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog))); /* Compress literals */ { const BYTE* const literals = seqStorePtr->litStart; size_t const litSize = seqStorePtr->lit - literals; int const disableLiteralCompression = (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0); size_t const cSize = ZSTD_compressLiterals( &prevEntropy->huf, &nextEntropy->huf, cctxParams->cParams.strategy, disableLiteralCompression, op, dstCapacity, literals, litSize, workspace, wkspSize, bmi2); if (ZSTD_isError(cSize)) return cSize; assert(cSize <= dstCapacity); op += cSize; } /* Sequences Header */ if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/) return ERROR(dstSize_tooSmall); if (nbSeq < 0x7F) *op++ = (BYTE)nbSeq; else if (nbSeq < LONGNBSEQ) op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2; else op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3; if (nbSeq==0) { /* Copy the old tables over as if we repeated them */ memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse)); return op - ostart; } /* seqHead : flags for FSE encoding type */ seqHead = op++; /* convert length/distances into codes */ ZSTD_seqToCodes(seqStorePtr); /* build CTable for Literal Lengths */ { U32 max = MaxLL; size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ DEBUGLOG(5, "Building LL table"); nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode; LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode, count, max, mostFrequent, nbSeq, LLFSELog, prevEntropy->fse.litlengthCTable, LL_defaultNorm, LL_defaultNormLog, ZSTD_defaultAllowed, strategy); assert(set_basic < set_compressed && set_rle < set_compressed); assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype, count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL, prevEntropy->fse.litlengthCTable, sizeof(prevEntropy->fse.litlengthCTable), workspace, wkspSize); if (ZSTD_isError(countSize)) return countSize; if (LLtype == set_compressed) lastNCount = op; op += countSize; } } /* build CTable for Offsets */ { U32 max = MaxOff; size_t const mostFrequent = HIST_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; DEBUGLOG(5, "Building OF table"); nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode; Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode, count, max, mostFrequent, nbSeq, OffFSELog, prevEntropy->fse.offcodeCTable, OF_defaultNorm, OF_defaultNormLog, defaultPolicy, strategy); assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype, count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, prevEntropy->fse.offcodeCTable, sizeof(prevEntropy->fse.offcodeCTable), workspace, wkspSize); if (ZSTD_isError(countSize)) return countSize; if (Offtype == set_compressed) lastNCount = op; op += countSize; } } /* build CTable for MatchLengths */ { U32 max = MaxML; size_t const mostFrequent = HIST_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op)); nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode; MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode, count, max, mostFrequent, nbSeq, MLFSELog, prevEntropy->fse.matchlengthCTable, ML_defaultNorm, ML_defaultNormLog, ZSTD_defaultAllowed, strategy); assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype, count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML, prevEntropy->fse.matchlengthCTable, sizeof(prevEntropy->fse.matchlengthCTable), workspace, wkspSize); if (ZSTD_isError(countSize)) return countSize; if (MLtype == set_compressed) lastNCount = op; op += countSize; } } *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); { size_t const bitstreamSize = ZSTD_encodeSequences( op, oend - op, CTable_MatchLength, mlCodeTable, CTable_OffsetBits, ofCodeTable, CTable_LitLength, llCodeTable, sequences, nbSeq, longOffsets, bmi2); if (ZSTD_isError(bitstreamSize)) return bitstreamSize; op += bitstreamSize; /* zstd versions <= 1.3.4 mistakenly report corruption when * FSE_readNCount() recieves a buffer < 4 bytes. * Fixed by https://github.com/facebook/zstd/pull/1146. * This can happen when the last set_compressed table present is 2 * bytes and the bitstream is only one byte. * In this exceedingly rare case, we will simply emit an uncompressed * block, since it isn't worth optimizing. */ if (lastNCount && (op - lastNCount) < 4) { /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */ assert(op - lastNCount == 3); DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by " "emitting an uncompressed block."); return 0; } } return op - ostart; }
169,672
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool MessageLoop::NestableTasksAllowed() const { return nestable_tasks_allowed_; } Commit Message: Introduce RunLoop::Type::NESTABLE_TASKS_ALLOWED to replace MessageLoop::ScopedNestableTaskAllower. (as well as MessageLoop::SetNestableTasksAllowed()) Surveying usage: the scoped object is always instantiated right before RunLoop().Run(). The intent is really to allow nestable tasks in that RunLoop so it's better to explicitly label that RunLoop as such and it allows us to break the last dependency that forced some RunLoop users to use MessageLoop APIs. There's also the odd case of allowing nestable tasks for loops that are reentrant from a native task (without going through RunLoop), these are the minority but will have to be handled (after cleaning up the majority of cases that are RunLoop induced). As highlighted by robliao@ in https://chromium-review.googlesource.com/c/600517 (which was merged in this CL). [email protected] Bug: 750779 Change-Id: I43d122c93ec903cff3a6fe7b77ec461ea0656448 Reviewed-on: https://chromium-review.googlesource.com/594713 Commit-Queue: Gabriel Charette <[email protected]> Reviewed-by: Robert Liao <[email protected]> Reviewed-by: danakj <[email protected]> Cr-Commit-Position: refs/heads/master@{#492263} CWE ID:
bool MessageLoop::NestableTasksAllowed() const { return nestable_tasks_allowed_ || run_loop_client_->ProcessingTasksAllowed(); }
171,865
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: GesturePoint::GesturePoint() : first_touch_time_(0.0), last_touch_time_(0.0), last_tap_time_(0.0), velocity_calculator_(kBufferedPoints) { } Commit Message: Add setters for the aura gesture recognizer constants. BUG=113227 TEST=none Review URL: http://codereview.chromium.org/9372040 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@122586 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-20
GesturePoint::GesturePoint() : first_touch_time_(0.0), last_touch_time_(0.0), last_tap_time_(0.0), velocity_calculator_(GestureConfiguration::buffered_points()) { }
171,041
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: status_t SampleTable::setCompositionTimeToSampleParams( off64_t data_offset, size_t data_size) { ALOGI("There are reordered frames present."); if (mCompositionTimeDeltaEntries != NULL || data_size < 8) { return ERROR_MALFORMED; } uint8_t header[8]; if (mDataSource->readAt( data_offset, header, sizeof(header)) < (ssize_t)sizeof(header)) { return ERROR_IO; } if (U32_AT(header) != 0) { return ERROR_MALFORMED; } size_t numEntries = U32_AT(&header[4]); if (data_size != (numEntries + 1) * 8) { return ERROR_MALFORMED; } mNumCompositionTimeDeltaEntries = numEntries; mCompositionTimeDeltaEntries = new uint32_t[2 * numEntries]; if (mDataSource->readAt( data_offset + 8, mCompositionTimeDeltaEntries, numEntries * 8) < (ssize_t)numEntries * 8) { delete[] mCompositionTimeDeltaEntries; mCompositionTimeDeltaEntries = NULL; return ERROR_IO; } for (size_t i = 0; i < 2 * numEntries; ++i) { mCompositionTimeDeltaEntries[i] = ntohl(mCompositionTimeDeltaEntries[i]); } mCompositionDeltaLookup->setEntries( mCompositionTimeDeltaEntries, mNumCompositionTimeDeltaEntries); return OK; } Commit Message: SampleTable: check integer overflow during table alloc Bug: 15328708 Bug: 15342615 Bug: 15342751 Change-Id: I6bb110a1eba46506799c73be8ff9a4f71c7e7053 CWE ID: CWE-189
status_t SampleTable::setCompositionTimeToSampleParams( off64_t data_offset, size_t data_size) { ALOGI("There are reordered frames present."); if (mCompositionTimeDeltaEntries != NULL || data_size < 8) { return ERROR_MALFORMED; } uint8_t header[8]; if (mDataSource->readAt( data_offset, header, sizeof(header)) < (ssize_t)sizeof(header)) { return ERROR_IO; } if (U32_AT(header) != 0) { return ERROR_MALFORMED; } size_t numEntries = U32_AT(&header[4]); if (data_size != (numEntries + 1) * 8) { return ERROR_MALFORMED; } mNumCompositionTimeDeltaEntries = numEntries; uint64_t allocSize = numEntries * 2 * sizeof(uint32_t); if (allocSize > SIZE_MAX) { return ERROR_OUT_OF_RANGE; } mCompositionTimeDeltaEntries = new uint32_t[2 * numEntries]; if (mDataSource->readAt( data_offset + 8, mCompositionTimeDeltaEntries, numEntries * 8) < (ssize_t)numEntries * 8) { delete[] mCompositionTimeDeltaEntries; mCompositionTimeDeltaEntries = NULL; return ERROR_IO; } for (size_t i = 0; i < 2 * numEntries; ++i) { mCompositionTimeDeltaEntries[i] = ntohl(mCompositionTimeDeltaEntries[i]); } mCompositionDeltaLookup->setEntries( mCompositionTimeDeltaEntries, mNumCompositionTimeDeltaEntries); return OK; }
173,375
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int xpm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { XPMDecContext *x = avctx->priv_data; AVFrame *p=data; const uint8_t *end, *ptr = avpkt->data; int ncolors, cpp, ret, i, j; int64_t size; uint32_t *dst; avctx->pix_fmt = AV_PIX_FMT_BGRA; end = avpkt->data + avpkt->size; while (memcmp(ptr, "/* XPM */", 9) && ptr < end - 9) ptr++; if (ptr >= end) { av_log(avctx, AV_LOG_ERROR, "missing signature\n"); return AVERROR_INVALIDDATA; } ptr += mod_strcspn(ptr, "\""); if (sscanf(ptr, "\"%u %u %u %u\",", &avctx->width, &avctx->height, &ncolors, &cpp) != 4) { av_log(avctx, AV_LOG_ERROR, "missing image parameters\n"); return AVERROR_INVALIDDATA; } if ((ret = ff_set_dimensions(avctx, avctx->width, avctx->height)) < 0) return ret; if ((ret = ff_get_buffer(avctx, p, 0)) < 0) return ret; if (cpp <= 0 || cpp >= 5) { av_log(avctx, AV_LOG_ERROR, "unsupported/invalid number of chars per pixel: %d\n", cpp); return AVERROR_INVALIDDATA; } size = 1; for (i = 0; i < cpp; i++) size *= 94; if (ncolors <= 0 || ncolors > size) { av_log(avctx, AV_LOG_ERROR, "invalid number of colors: %d\n", ncolors); return AVERROR_INVALIDDATA; } size *= 4; av_fast_padded_malloc(&x->pixels, &x->pixels_size, size); if (!x->pixels) return AVERROR(ENOMEM); ptr += mod_strcspn(ptr, ",") + 1; for (i = 0; i < ncolors; i++) { const uint8_t *index; int len; ptr += mod_strcspn(ptr, "\"") + 1; if (ptr + cpp > end) return AVERROR_INVALIDDATA; index = ptr; ptr += cpp; ptr = strstr(ptr, "c "); if (ptr) { ptr += 2; } else { return AVERROR_INVALIDDATA; } len = strcspn(ptr, "\" "); if ((ret = ascii2index(index, cpp)) < 0) return ret; x->pixels[ret] = color_string_to_rgba(ptr, len); ptr += mod_strcspn(ptr, ",") + 1; } for (i = 0; i < avctx->height; i++) { dst = (uint32_t *)(p->data[0] + i * p->linesize[0]); ptr += mod_strcspn(ptr, "\"") + 1; for (j = 0; j < avctx->width; j++) { if (ptr + cpp > end) return AVERROR_INVALIDDATA; if ((ret = ascii2index(ptr, cpp)) < 0) return ret; *dst++ = x->pixels[ret]; ptr += cpp; } ptr += mod_strcspn(ptr, ",") + 1; } p->key_frame = 1; p->pict_type = AV_PICTURE_TYPE_I; *got_frame = 1; return avpkt->size; } Commit Message: avcodec/xpmdec: Fix multiple pointer/memory issues Most of these were found through code review in response to fixing 1466/clusterfuzz-testcase-minimized-5961584419536896 There is thus no testcase for most of this. The initial issue was Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/targets/ffmpeg Signed-off-by: Michael Niedermayer <[email protected]> CWE ID: CWE-119
static int xpm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { XPMDecContext *x = avctx->priv_data; AVFrame *p=data; const uint8_t *end, *ptr; int ncolors, cpp, ret, i, j; int64_t size; uint32_t *dst; avctx->pix_fmt = AV_PIX_FMT_BGRA; av_fast_padded_malloc(&x->buf, &x->buf_size, avpkt->size); if (!x->buf) return AVERROR(ENOMEM); memcpy(x->buf, avpkt->data, avpkt->size); x->buf[avpkt->size] = 0; ptr = x->buf; end = x->buf + avpkt->size; while (end - ptr > 9 && memcmp(ptr, "/* XPM */", 9)) ptr++; if (end - ptr <= 9) { av_log(avctx, AV_LOG_ERROR, "missing signature\n"); return AVERROR_INVALIDDATA; } ptr += mod_strcspn(ptr, "\""); if (sscanf(ptr, "\"%u %u %u %u\",", &avctx->width, &avctx->height, &ncolors, &cpp) != 4) { av_log(avctx, AV_LOG_ERROR, "missing image parameters\n"); return AVERROR_INVALIDDATA; } if ((ret = ff_set_dimensions(avctx, avctx->width, avctx->height)) < 0) return ret; if ((ret = ff_get_buffer(avctx, p, 0)) < 0) return ret; if (cpp <= 0 || cpp >= 5) { av_log(avctx, AV_LOG_ERROR, "unsupported/invalid number of chars per pixel: %d\n", cpp); return AVERROR_INVALIDDATA; } size = 1; for (i = 0; i < cpp; i++) size *= 95; if (ncolors <= 0 || ncolors > size) { av_log(avctx, AV_LOG_ERROR, "invalid number of colors: %d\n", ncolors); return AVERROR_INVALIDDATA; } size *= 4; av_fast_padded_malloc(&x->pixels, &x->pixels_size, size); if (!x->pixels) return AVERROR(ENOMEM); ptr += mod_strcspn(ptr, ",") + 1; if (end - ptr < 1) return AVERROR_INVALIDDATA; for (i = 0; i < ncolors; i++) { const uint8_t *index; int len; ptr += mod_strcspn(ptr, "\"") + 1; if (end - ptr < cpp) return AVERROR_INVALIDDATA; index = ptr; ptr += cpp; ptr = strstr(ptr, "c "); if (ptr) { ptr += 2; } else { return AVERROR_INVALIDDATA; } len = strcspn(ptr, "\" "); if ((ret = ascii2index(index, cpp)) < 0) return ret; x->pixels[ret] = color_string_to_rgba(ptr, len); ptr += mod_strcspn(ptr, ",") + 1; if (end - ptr < 1) return AVERROR_INVALIDDATA; } for (i = 0; i < avctx->height; i++) { dst = (uint32_t *)(p->data[0] + i * p->linesize[0]); if (end - ptr < 1) return AVERROR_INVALIDDATA; ptr += mod_strcspn(ptr, "\"") + 1; if (end - ptr < 1) return AVERROR_INVALIDDATA; for (j = 0; j < avctx->width; j++) { if (end - ptr < cpp) return AVERROR_INVALIDDATA; if ((ret = ascii2index(ptr, cpp)) < 0) return ret; *dst++ = x->pixels[ret]; ptr += cpp; } ptr += mod_strcspn(ptr, ",") + 1; } p->key_frame = 1; p->pict_type = AV_PICTURE_TYPE_I; *got_frame = 1; return avpkt->size; }
168,078
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int iscsi_add_notunderstood_response( char *key, char *value, struct iscsi_param_list *param_list) { struct iscsi_extra_response *extra_response; if (strlen(value) > VALUE_MAXLEN) { pr_err("Value for notunderstood key \"%s\" exceeds %d," " protocol error.\n", key, VALUE_MAXLEN); return -1; } extra_response = kzalloc(sizeof(struct iscsi_extra_response), GFP_KERNEL); if (!extra_response) { pr_err("Unable to allocate memory for" " struct iscsi_extra_response.\n"); return -1; } INIT_LIST_HEAD(&extra_response->er_list); strncpy(extra_response->key, key, strlen(key) + 1); strncpy(extra_response->value, NOTUNDERSTOOD, strlen(NOTUNDERSTOOD) + 1); list_add_tail(&extra_response->er_list, &param_list->extra_response_list); return 0; } Commit Message: iscsi-target: fix heap buffer overflow on error If a key was larger than 64 bytes, as checked by iscsi_check_key(), the error response packet, generated by iscsi_add_notunderstood_response(), would still attempt to copy the entire key into the packet, overflowing the structure on the heap. Remote preauthentication kernel memory corruption was possible if a target was configured and listening on the network. CVE-2013-2850 Signed-off-by: Kees Cook <[email protected]> Cc: [email protected] Signed-off-by: Nicholas Bellinger <[email protected]> CWE ID: CWE-119
static int iscsi_add_notunderstood_response( char *key, char *value, struct iscsi_param_list *param_list) { struct iscsi_extra_response *extra_response; if (strlen(value) > VALUE_MAXLEN) { pr_err("Value for notunderstood key \"%s\" exceeds %d," " protocol error.\n", key, VALUE_MAXLEN); return -1; } extra_response = kzalloc(sizeof(struct iscsi_extra_response), GFP_KERNEL); if (!extra_response) { pr_err("Unable to allocate memory for" " struct iscsi_extra_response.\n"); return -1; } INIT_LIST_HEAD(&extra_response->er_list); strlcpy(extra_response->key, key, sizeof(extra_response->key)); strlcpy(extra_response->value, NOTUNDERSTOOD, sizeof(extra_response->value)); list_add_tail(&extra_response->er_list, &param_list->extra_response_list); return 0; }
166,050
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: status_t Camera2Client::dump(int fd, const Vector<String16>& args) { String8 result; result.appendFormat("Client2[%d] (%p) Client: %s PID: %d, dump:\n", mCameraId, getRemoteCallback()->asBinder().get(), String8(mClientPackageName).string(), mClientPid); result.append(" State: "); #define CASE_APPEND_ENUM(x) case x: result.append(#x "\n"); break; const Parameters& p = mParameters.unsafeAccess(); result.append(Parameters::getStateName(p.state)); result.append("\n Current parameters:\n"); result.appendFormat(" Preview size: %d x %d\n", p.previewWidth, p.previewHeight); result.appendFormat(" Preview FPS range: %d - %d\n", p.previewFpsRange[0], p.previewFpsRange[1]); result.appendFormat(" Preview HAL pixel format: 0x%x\n", p.previewFormat); result.appendFormat(" Preview transform: %x\n", p.previewTransform); result.appendFormat(" Picture size: %d x %d\n", p.pictureWidth, p.pictureHeight); result.appendFormat(" Jpeg thumbnail size: %d x %d\n", p.jpegThumbSize[0], p.jpegThumbSize[1]); result.appendFormat(" Jpeg quality: %d, thumbnail quality: %d\n", p.jpegQuality, p.jpegThumbQuality); result.appendFormat(" Jpeg rotation: %d\n", p.jpegRotation); result.appendFormat(" GPS tags %s\n", p.gpsEnabled ? "enabled" : "disabled"); if (p.gpsEnabled) { result.appendFormat(" GPS lat x long x alt: %f x %f x %f\n", p.gpsCoordinates[0], p.gpsCoordinates[1], p.gpsCoordinates[2]); result.appendFormat(" GPS timestamp: %lld\n", p.gpsTimestamp); result.appendFormat(" GPS processing method: %s\n", p.gpsProcessingMethod.string()); } result.append(" White balance mode: "); switch (p.wbMode) { CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_AUTO) CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_INCANDESCENT) CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_FLUORESCENT) CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_WARM_FLUORESCENT) CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_DAYLIGHT) CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT) CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_TWILIGHT) CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_SHADE) default: result.append("UNKNOWN\n"); } result.append(" Effect mode: "); switch (p.effectMode) { CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_OFF) CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_MONO) CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_NEGATIVE) CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_SOLARIZE) CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_SEPIA) CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_POSTERIZE) CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_WHITEBOARD) CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_BLACKBOARD) CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_AQUA) default: result.append("UNKNOWN\n"); } result.append(" Antibanding mode: "); switch (p.antibandingMode) { CASE_APPEND_ENUM(ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO) CASE_APPEND_ENUM(ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF) CASE_APPEND_ENUM(ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ) CASE_APPEND_ENUM(ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ) default: result.append("UNKNOWN\n"); } result.append(" Scene mode: "); switch (p.sceneMode) { case ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED: result.append("AUTO\n"); break; CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_ACTION) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_PORTRAIT) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_LANDSCAPE) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_NIGHT) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_NIGHT_PORTRAIT) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_THEATRE) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_BEACH) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_SNOW) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_SUNSET) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_STEADYPHOTO) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_FIREWORKS) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_SPORTS) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_PARTY) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_CANDLELIGHT) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_BARCODE) default: result.append("UNKNOWN\n"); } result.append(" Flash mode: "); switch (p.flashMode) { CASE_APPEND_ENUM(Parameters::FLASH_MODE_OFF) CASE_APPEND_ENUM(Parameters::FLASH_MODE_AUTO) CASE_APPEND_ENUM(Parameters::FLASH_MODE_ON) CASE_APPEND_ENUM(Parameters::FLASH_MODE_TORCH) CASE_APPEND_ENUM(Parameters::FLASH_MODE_RED_EYE) CASE_APPEND_ENUM(Parameters::FLASH_MODE_INVALID) default: result.append("UNKNOWN\n"); } result.append(" Focus mode: "); switch (p.focusMode) { CASE_APPEND_ENUM(Parameters::FOCUS_MODE_AUTO) CASE_APPEND_ENUM(Parameters::FOCUS_MODE_MACRO) CASE_APPEND_ENUM(Parameters::FOCUS_MODE_CONTINUOUS_VIDEO) CASE_APPEND_ENUM(Parameters::FOCUS_MODE_CONTINUOUS_PICTURE) CASE_APPEND_ENUM(Parameters::FOCUS_MODE_EDOF) CASE_APPEND_ENUM(Parameters::FOCUS_MODE_INFINITY) CASE_APPEND_ENUM(Parameters::FOCUS_MODE_FIXED) CASE_APPEND_ENUM(Parameters::FOCUS_MODE_INVALID) default: result.append("UNKNOWN\n"); } result.append(" Focus state: "); switch (p.focusState) { CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_INACTIVE) CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN) CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED) CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_PASSIVE_UNFOCUSED) CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN) CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED) CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED) default: result.append("UNKNOWN\n"); } result.append(" Focusing areas:\n"); for (size_t i = 0; i < p.focusingAreas.size(); i++) { result.appendFormat(" [ (%d, %d, %d, %d), weight %d ]\n", p.focusingAreas[i].left, p.focusingAreas[i].top, p.focusingAreas[i].right, p.focusingAreas[i].bottom, p.focusingAreas[i].weight); } result.appendFormat(" Exposure compensation index: %d\n", p.exposureCompensation); result.appendFormat(" AE lock %s, AWB lock %s\n", p.autoExposureLock ? "enabled" : "disabled", p.autoWhiteBalanceLock ? "enabled" : "disabled" ); result.appendFormat(" Metering areas:\n"); for (size_t i = 0; i < p.meteringAreas.size(); i++) { result.appendFormat(" [ (%d, %d, %d, %d), weight %d ]\n", p.meteringAreas[i].left, p.meteringAreas[i].top, p.meteringAreas[i].right, p.meteringAreas[i].bottom, p.meteringAreas[i].weight); } result.appendFormat(" Zoom index: %d\n", p.zoom); result.appendFormat(" Video size: %d x %d\n", p.videoWidth, p.videoHeight); result.appendFormat(" Recording hint is %s\n", p.recordingHint ? "set" : "not set"); result.appendFormat(" Video stabilization is %s\n", p.videoStabilization ? "enabled" : "disabled"); result.appendFormat(" Selected still capture FPS range: %d - %d\n", p.fastInfo.bestStillCaptureFpsRange[0], p.fastInfo.bestStillCaptureFpsRange[1]); result.append(" Current streams:\n"); result.appendFormat(" Preview stream ID: %d\n", getPreviewStreamId()); result.appendFormat(" Capture stream ID: %d\n", getCaptureStreamId()); result.appendFormat(" Recording stream ID: %d\n", getRecordingStreamId()); result.append(" Quirks for this camera:\n"); bool haveQuirk = false; if (p.quirks.triggerAfWithAuto) { result.appendFormat(" triggerAfWithAuto\n"); haveQuirk = true; } if (p.quirks.useZslFormat) { result.appendFormat(" useZslFormat\n"); haveQuirk = true; } if (p.quirks.meteringCropRegion) { result.appendFormat(" meteringCropRegion\n"); haveQuirk = true; } if (p.quirks.partialResults) { result.appendFormat(" usePartialResult\n"); haveQuirk = true; } if (!haveQuirk) { result.appendFormat(" none\n"); } write(fd, result.string(), result.size()); mStreamingProcessor->dump(fd, args); mCaptureSequencer->dump(fd, args); mFrameProcessor->dump(fd, args); mZslProcessor->dump(fd, args); return dumpDevice(fd, args); #undef CASE_APPEND_ENUM } Commit Message: Camera: Disallow dumping clients directly Camera service dumps should only be initiated through ICameraService::dump. Bug: 26265403 Change-Id: If3ca4718ed74bf33ad8a416192689203029e2803 CWE ID: CWE-264
status_t Camera2Client::dump(int fd, const Vector<String16>& args) { return BasicClient::dump(fd, args); } status_t Camera2Client::dumpClient(int fd, const Vector<String16>& args) { String8 result; result.appendFormat("Client2[%d] (%p) Client: %s PID: %d, dump:\n", mCameraId, getRemoteCallback()->asBinder().get(), String8(mClientPackageName).string(), mClientPid); result.append(" State: "); #define CASE_APPEND_ENUM(x) case x: result.append(#x "\n"); break; const Parameters& p = mParameters.unsafeAccess(); result.append(Parameters::getStateName(p.state)); result.append("\n Current parameters:\n"); result.appendFormat(" Preview size: %d x %d\n", p.previewWidth, p.previewHeight); result.appendFormat(" Preview FPS range: %d - %d\n", p.previewFpsRange[0], p.previewFpsRange[1]); result.appendFormat(" Preview HAL pixel format: 0x%x\n", p.previewFormat); result.appendFormat(" Preview transform: %x\n", p.previewTransform); result.appendFormat(" Picture size: %d x %d\n", p.pictureWidth, p.pictureHeight); result.appendFormat(" Jpeg thumbnail size: %d x %d\n", p.jpegThumbSize[0], p.jpegThumbSize[1]); result.appendFormat(" Jpeg quality: %d, thumbnail quality: %d\n", p.jpegQuality, p.jpegThumbQuality); result.appendFormat(" Jpeg rotation: %d\n", p.jpegRotation); result.appendFormat(" GPS tags %s\n", p.gpsEnabled ? "enabled" : "disabled"); if (p.gpsEnabled) { result.appendFormat(" GPS lat x long x alt: %f x %f x %f\n", p.gpsCoordinates[0], p.gpsCoordinates[1], p.gpsCoordinates[2]); result.appendFormat(" GPS timestamp: %lld\n", p.gpsTimestamp); result.appendFormat(" GPS processing method: %s\n", p.gpsProcessingMethod.string()); } result.append(" White balance mode: "); switch (p.wbMode) { CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_AUTO) CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_INCANDESCENT) CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_FLUORESCENT) CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_WARM_FLUORESCENT) CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_DAYLIGHT) CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT) CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_TWILIGHT) CASE_APPEND_ENUM(ANDROID_CONTROL_AWB_MODE_SHADE) default: result.append("UNKNOWN\n"); } result.append(" Effect mode: "); switch (p.effectMode) { CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_OFF) CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_MONO) CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_NEGATIVE) CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_SOLARIZE) CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_SEPIA) CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_POSTERIZE) CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_WHITEBOARD) CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_BLACKBOARD) CASE_APPEND_ENUM(ANDROID_CONTROL_EFFECT_MODE_AQUA) default: result.append("UNKNOWN\n"); } result.append(" Antibanding mode: "); switch (p.antibandingMode) { CASE_APPEND_ENUM(ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO) CASE_APPEND_ENUM(ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF) CASE_APPEND_ENUM(ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ) CASE_APPEND_ENUM(ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ) default: result.append("UNKNOWN\n"); } result.append(" Scene mode: "); switch (p.sceneMode) { case ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED: result.append("AUTO\n"); break; CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_ACTION) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_PORTRAIT) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_LANDSCAPE) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_NIGHT) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_NIGHT_PORTRAIT) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_THEATRE) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_BEACH) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_SNOW) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_SUNSET) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_STEADYPHOTO) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_FIREWORKS) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_SPORTS) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_PARTY) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_CANDLELIGHT) CASE_APPEND_ENUM(ANDROID_CONTROL_SCENE_MODE_BARCODE) default: result.append("UNKNOWN\n"); } result.append(" Flash mode: "); switch (p.flashMode) { CASE_APPEND_ENUM(Parameters::FLASH_MODE_OFF) CASE_APPEND_ENUM(Parameters::FLASH_MODE_AUTO) CASE_APPEND_ENUM(Parameters::FLASH_MODE_ON) CASE_APPEND_ENUM(Parameters::FLASH_MODE_TORCH) CASE_APPEND_ENUM(Parameters::FLASH_MODE_RED_EYE) CASE_APPEND_ENUM(Parameters::FLASH_MODE_INVALID) default: result.append("UNKNOWN\n"); } result.append(" Focus mode: "); switch (p.focusMode) { CASE_APPEND_ENUM(Parameters::FOCUS_MODE_AUTO) CASE_APPEND_ENUM(Parameters::FOCUS_MODE_MACRO) CASE_APPEND_ENUM(Parameters::FOCUS_MODE_CONTINUOUS_VIDEO) CASE_APPEND_ENUM(Parameters::FOCUS_MODE_CONTINUOUS_PICTURE) CASE_APPEND_ENUM(Parameters::FOCUS_MODE_EDOF) CASE_APPEND_ENUM(Parameters::FOCUS_MODE_INFINITY) CASE_APPEND_ENUM(Parameters::FOCUS_MODE_FIXED) CASE_APPEND_ENUM(Parameters::FOCUS_MODE_INVALID) default: result.append("UNKNOWN\n"); } result.append(" Focus state: "); switch (p.focusState) { CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_INACTIVE) CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN) CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED) CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_PASSIVE_UNFOCUSED) CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN) CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED) CASE_APPEND_ENUM(ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED) default: result.append("UNKNOWN\n"); } result.append(" Focusing areas:\n"); for (size_t i = 0; i < p.focusingAreas.size(); i++) { result.appendFormat(" [ (%d, %d, %d, %d), weight %d ]\n", p.focusingAreas[i].left, p.focusingAreas[i].top, p.focusingAreas[i].right, p.focusingAreas[i].bottom, p.focusingAreas[i].weight); } result.appendFormat(" Exposure compensation index: %d\n", p.exposureCompensation); result.appendFormat(" AE lock %s, AWB lock %s\n", p.autoExposureLock ? "enabled" : "disabled", p.autoWhiteBalanceLock ? "enabled" : "disabled" ); result.appendFormat(" Metering areas:\n"); for (size_t i = 0; i < p.meteringAreas.size(); i++) { result.appendFormat(" [ (%d, %d, %d, %d), weight %d ]\n", p.meteringAreas[i].left, p.meteringAreas[i].top, p.meteringAreas[i].right, p.meteringAreas[i].bottom, p.meteringAreas[i].weight); } result.appendFormat(" Zoom index: %d\n", p.zoom); result.appendFormat(" Video size: %d x %d\n", p.videoWidth, p.videoHeight); result.appendFormat(" Recording hint is %s\n", p.recordingHint ? "set" : "not set"); result.appendFormat(" Video stabilization is %s\n", p.videoStabilization ? "enabled" : "disabled"); result.appendFormat(" Selected still capture FPS range: %d - %d\n", p.fastInfo.bestStillCaptureFpsRange[0], p.fastInfo.bestStillCaptureFpsRange[1]); result.append(" Current streams:\n"); result.appendFormat(" Preview stream ID: %d\n", getPreviewStreamId()); result.appendFormat(" Capture stream ID: %d\n", getCaptureStreamId()); result.appendFormat(" Recording stream ID: %d\n", getRecordingStreamId()); result.append(" Quirks for this camera:\n"); bool haveQuirk = false; if (p.quirks.triggerAfWithAuto) { result.appendFormat(" triggerAfWithAuto\n"); haveQuirk = true; } if (p.quirks.useZslFormat) { result.appendFormat(" useZslFormat\n"); haveQuirk = true; } if (p.quirks.meteringCropRegion) { result.appendFormat(" meteringCropRegion\n"); haveQuirk = true; } if (p.quirks.partialResults) { result.appendFormat(" usePartialResult\n"); haveQuirk = true; } if (!haveQuirk) { result.appendFormat(" none\n"); } write(fd, result.string(), result.size()); mStreamingProcessor->dump(fd, args); mCaptureSequencer->dump(fd, args); mFrameProcessor->dump(fd, args); mZslProcessor->dump(fd, args); return dumpDevice(fd, args); #undef CASE_APPEND_ENUM }
173,937
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, u32 rmr, u64 to, u32 xdr_off, int write_len, struct svc_rdma_req_map *vec) { struct ib_rdma_wr write_wr; struct ib_sge *sge; int xdr_sge_no; int sge_no; int sge_bytes; int sge_off; int bc; struct svc_rdma_op_ctxt *ctxt; if (vec->count > RPCSVC_MAXPAGES) { pr_err("svcrdma: Too many pages (%lu)\n", vec->count); return -EIO; } dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, " "write_len=%d, vec->sge=%p, vec->count=%lu\n", rmr, (unsigned long long)to, xdr_off, write_len, vec->sge, vec->count); ctxt = svc_rdma_get_context(xprt); ctxt->direction = DMA_TO_DEVICE; sge = ctxt->sge; /* Find the SGE associated with xdr_off */ for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count; xdr_sge_no++) { if (vec->sge[xdr_sge_no].iov_len > bc) break; bc -= vec->sge[xdr_sge_no].iov_len; } sge_off = bc; bc = write_len; sge_no = 0; /* Copy the remaining SGE */ while (bc != 0) { sge_bytes = min_t(size_t, bc, vec->sge[xdr_sge_no].iov_len-sge_off); sge[sge_no].length = sge_bytes; sge[sge_no].addr = dma_map_xdr(xprt, &rqstp->rq_res, xdr_off, sge_bytes, DMA_TO_DEVICE); xdr_off += sge_bytes; if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge[sge_no].addr)) goto err; svc_rdma_count_mappings(xprt, ctxt); sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey; ctxt->count++; sge_off = 0; sge_no++; xdr_sge_no++; if (xdr_sge_no > vec->count) { pr_err("svcrdma: Too many sges (%d)\n", xdr_sge_no); goto err; } bc -= sge_bytes; if (sge_no == xprt->sc_max_sge) break; } /* Prepare WRITE WR */ memset(&write_wr, 0, sizeof write_wr); ctxt->cqe.done = svc_rdma_wc_write; write_wr.wr.wr_cqe = &ctxt->cqe; write_wr.wr.sg_list = &sge[0]; write_wr.wr.num_sge = sge_no; write_wr.wr.opcode = IB_WR_RDMA_WRITE; write_wr.wr.send_flags = IB_SEND_SIGNALED; write_wr.rkey = rmr; write_wr.remote_addr = to; /* Post It */ atomic_inc(&rdma_stat_write); if (svc_rdma_send(xprt, &write_wr.wr)) goto err; return write_len - bc; err: svc_rdma_unmap_dma(ctxt); svc_rdma_put_context(ctxt, 0); return -EIO; } Commit Message: Merge tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux Pull nfsd updates from Bruce Fields: "Another RDMA update from Chuck Lever, and a bunch of miscellaneous bugfixes" * tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux: (26 commits) nfsd: Fix up the "supattr_exclcreat" attributes nfsd: encoders mustn't use unitialized values in error cases nfsd: fix undefined behavior in nfsd4_layout_verify lockd: fix lockd shutdown race NFSv4: Fix callback server shutdown SUNRPC: Refactor svc_set_num_threads() NFSv4.x/callback: Create the callback service through svc_create_pooled lockd: remove redundant check on block svcrdma: Clean out old XDR encoders svcrdma: Remove the req_map cache svcrdma: Remove unused RDMA Write completion handler svcrdma: Reduce size of sge array in struct svc_rdma_op_ctxt svcrdma: Clean up RPC-over-RDMA backchannel reply processing svcrdma: Report Write/Reply chunk overruns svcrdma: Clean up RDMA_ERROR path svcrdma: Use rdma_rw API in RPC reply path svcrdma: Introduce local rdma_rw API helpers svcrdma: Clean up svc_rdma_get_inv_rkey() svcrdma: Add helper to save pages under I/O svcrdma: Eliminate RPCRDMA_SQ_DEPTH_MULT ... CWE ID: CWE-404
static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma, struct svc_rdma_op_ctxt *ctxt, unsigned int sge_no, struct page *page, unsigned int offset, unsigned int len) { struct ib_device *dev = rdma->sc_cm_id->device; dma_addr_t dma_addr; dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE); if (ib_dma_mapping_error(dev, dma_addr)) return -EIO; ctxt->sge[sge_no].addr = dma_addr; ctxt->sge[sge_no].length = len; ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey; svc_rdma_count_mappings(rdma, ctxt); return 0; } /** * svc_rdma_map_reply_hdr - DMA map the transport header buffer * @rdma: controlling transport * @ctxt: op_ctxt for the Send WR * @rdma_resp: buffer containing transport header * @len: length of transport header * * Returns: * %0 if the header is DMA mapped, * %-EIO if DMA mapping failed. */ int svc_rdma_map_reply_hdr(struct svcxprt_rdma *rdma, struct svc_rdma_op_ctxt *ctxt, __be32 *rdma_resp, unsigned int len) { ctxt->direction = DMA_TO_DEVICE; ctxt->pages[0] = virt_to_page(rdma_resp); ctxt->count = 1; return svc_rdma_dma_map_page(rdma, ctxt, 0, ctxt->pages[0], 0, len); }
168,169
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void SpeechRecognitionManagerImpl::RecognitionAllowedCallback(int session_id, bool ask_user, bool is_allowed) { DCHECK_CURRENTLY_ON(BrowserThread::IO); auto iter = sessions_.find(session_id); if (iter == sessions_.end()) return; Session* session = iter->second.get(); if (session->abort_requested) return; if (ask_user) { SpeechRecognitionSessionContext& context = session->context; context.label = media_stream_manager_->MakeMediaAccessRequest( context.render_process_id, context.render_frame_id, session_id, StreamControls(true, false), context.security_origin, base::BindOnce( &SpeechRecognitionManagerImpl::MediaRequestPermissionCallback, weak_factory_.GetWeakPtr(), session_id)); return; } if (is_allowed) { base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&SpeechRecognitionManagerImpl::DispatchEvent, weak_factory_.GetWeakPtr(), session_id, EVENT_START)); } else { OnRecognitionError( session_id, blink::mojom::SpeechRecognitionError( blink::mojom::SpeechRecognitionErrorCode::kNotAllowed, blink::mojom::SpeechAudioErrorDetails::kNone)); base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&SpeechRecognitionManagerImpl::DispatchEvent, weak_factory_.GetWeakPtr(), session_id, EVENT_ABORT)); } } Commit Message: Make MediaStreamDispatcherHost per-request instead of per-frame. Instead of having RenderFrameHost own a single MSDH to handle all requests from a frame, MSDH objects will be owned by a strong binding. A consequence of this is that an additional requester ID is added to requests to MediaStreamManager, so that an MSDH is able to cancel only requests generated by it. In practice, MSDH will continue to be per frame in most cases since each frame normally makes a single request for an MSDH object. This fixes a lifetime issue caused by the IO thread executing tasks after the RenderFrameHost dies. Drive-by: Fix some minor lint issues. Bug: 912520 Change-Id: I52742ffc98b9fc57ce8e6f5093a61aed86d3e516 Reviewed-on: https://chromium-review.googlesource.com/c/1369799 Reviewed-by: Emircan Uysaler <[email protected]> Reviewed-by: Ken Buchanan <[email protected]> Reviewed-by: Olga Sharonova <[email protected]> Commit-Queue: Guido Urdaneta <[email protected]> Cr-Commit-Position: refs/heads/master@{#616347} CWE ID: CWE-189
void SpeechRecognitionManagerImpl::RecognitionAllowedCallback(int session_id, bool ask_user, bool is_allowed) { DCHECK_CURRENTLY_ON(BrowserThread::IO); auto iter = sessions_.find(session_id); if (iter == sessions_.end()) return; Session* session = iter->second.get(); if (session->abort_requested) return; if (ask_user) { SpeechRecognitionSessionContext& context = session->context; context.label = media_stream_manager_->MakeMediaAccessRequest( context.render_process_id, context.render_frame_id, requester_id_, session_id, StreamControls(true, false), context.security_origin, base::BindOnce( &SpeechRecognitionManagerImpl::MediaRequestPermissionCallback, weak_factory_.GetWeakPtr(), session_id)); return; } if (is_allowed) { base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&SpeechRecognitionManagerImpl::DispatchEvent, weak_factory_.GetWeakPtr(), session_id, EVENT_START)); } else { OnRecognitionError( session_id, blink::mojom::SpeechRecognitionError( blink::mojom::SpeechRecognitionErrorCode::kNotAllowed, blink::mojom::SpeechAudioErrorDetails::kNone)); base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&SpeechRecognitionManagerImpl::DispatchEvent, weak_factory_.GetWeakPtr(), session_id, EVENT_ABORT)); } }
173,110
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void * calloc(size_t n, size_t lb) { if (lb && n > SIZE_MAX / lb) return NULL; # if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */ /* libpthread allocated some memory that is only pointed to by */ /* mmapped thread stacks. Make sure it's not collectable. */ { static GC_bool lib_bounds_set = FALSE; ptr_t caller = (ptr_t)__builtin_return_address(0); /* This test does not need to ensure memory visibility, since */ /* the bounds will be set when/if we create another thread. */ if (!EXPECT(lib_bounds_set, TRUE)) { GC_init_lib_bounds(); lib_bounds_set = TRUE; } if (((word)caller >= (word)GC_libpthread_start && (word)caller < (word)GC_libpthread_end) || ((word)caller >= (word)GC_libld_start && (word)caller < (word)GC_libld_end)) return GC_malloc_uncollectable(n*lb); /* The two ranges are actually usually adjacent, so there may */ /* be a way to speed this up. */ } # endif return((void *)REDIRECT_MALLOC(n*lb)); } Commit Message: Fix calloc-related code to prevent SIZE_MAX redefinition in sys headers * malloc.c: Include limits.h for SIZE_MAX. * malloc.c (SIZE_MAX, calloc): Define GC_SIZE_MAX instead of SIZE_MAX. CWE ID: CWE-189
void * calloc(size_t n, size_t lb) { if (lb && n > GC_SIZE_MAX / lb) return NULL; # if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */ /* libpthread allocated some memory that is only pointed to by */ /* mmapped thread stacks. Make sure it's not collectable. */ { static GC_bool lib_bounds_set = FALSE; ptr_t caller = (ptr_t)__builtin_return_address(0); /* This test does not need to ensure memory visibility, since */ /* the bounds will be set when/if we create another thread. */ if (!EXPECT(lib_bounds_set, TRUE)) { GC_init_lib_bounds(); lib_bounds_set = TRUE; } if (((word)caller >= (word)GC_libpthread_start && (word)caller < (word)GC_libpthread_end) || ((word)caller >= (word)GC_libld_start && (word)caller < (word)GC_libld_end)) return GC_malloc_uncollectable(n*lb); /* The two ranges are actually usually adjacent, so there may */ /* be a way to speed this up. */ } # endif return((void *)REDIRECT_MALLOC(n*lb)); }
169,880
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int spl_filesystem_file_read_csv(spl_filesystem_object *intern, char delimiter, char enclosure, char escape, zval *return_value TSRMLS_DC) /* {{{ */ { int ret = SUCCESS; do { ret = spl_filesystem_file_read(intern, 1 TSRMLS_CC); } while (ret == SUCCESS && !intern->u.file.current_line_len && SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_SKIP_EMPTY)); if (ret == SUCCESS) { size_t buf_len = intern->u.file.current_line_len; char *buf = estrndup(intern->u.file.current_line, buf_len); if (intern->u.file.current_zval) { zval_ptr_dtor(&intern->u.file.current_zval); } ALLOC_INIT_ZVAL(intern->u.file.current_zval); php_fgetcsv(intern->u.file.stream, delimiter, enclosure, escape, buf_len, buf, intern->u.file.current_zval TSRMLS_CC); if (return_value) { if (Z_TYPE_P(return_value) != IS_NULL) { zval_dtor(return_value); ZVAL_NULL(return_value); } ZVAL_ZVAL(return_value, intern->u.file.current_zval, 1, 0); } } return ret; } /* }}} */ Commit Message: Fix bug #72262 - do not overflow int CWE ID: CWE-190
static int spl_filesystem_file_read_csv(spl_filesystem_object *intern, char delimiter, char enclosure, char escape, zval *return_value TSRMLS_DC) /* {{{ */ { int ret = SUCCESS; do { ret = spl_filesystem_file_read(intern, 1 TSRMLS_CC); } while (ret == SUCCESS && !intern->u.file.current_line_len && SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_SKIP_EMPTY)); if (ret == SUCCESS) { size_t buf_len = intern->u.file.current_line_len; char *buf = estrndup(intern->u.file.current_line, buf_len); if (intern->u.file.current_zval) { zval_ptr_dtor(&intern->u.file.current_zval); } ALLOC_INIT_ZVAL(intern->u.file.current_zval); php_fgetcsv(intern->u.file.stream, delimiter, enclosure, escape, buf_len, buf, intern->u.file.current_zval TSRMLS_CC); if (return_value) { if (Z_TYPE_P(return_value) != IS_NULL) { zval_dtor(return_value); ZVAL_NULL(return_value); } ZVAL_ZVAL(return_value, intern->u.file.current_zval, 1, 0); } } return ret; } /* }}} */
167,077
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int on_http_message_complete(http_parser* parser) { struct clt_info *info = parser->data; ws_svr *svr = ws_svr_from_ses(info->ses); info->request->version_major = parser->http_major; info->request->version_minor = parser->http_minor; info->request->method = parser->method; dict_entry *entry; dict_iterator *iter = dict_get_iterator(info->request->headers); while ((entry = dict_next(iter)) != NULL) { log_trace("Header: %s: %s", (char *)entry->key, (char *)entry->val); } dict_release_iterator(iter); if (info->request->method != HTTP_GET) goto error; if (http_request_get_header(info->request, "Host") == NULL) goto error; double version = info->request->version_major + info->request->version_minor * 0.1; if (version < 1.1) goto error; const char *upgrade = http_request_get_header(info->request, "Upgrade"); if (upgrade == NULL || strcasecmp(upgrade, "websocket") != 0) goto error; const char *connection = http_request_get_header(info->request, "Connection"); if (connection == NULL) goto error; else { bool found_upgrade = false; int count; sds *tokens = sdssplitlen(connection, strlen(connection), ",", 1, &count); if (tokens == NULL) goto error; for (int i = 0; i < count; i++) { sds token = tokens[i]; sdstrim(token, " "); if (strcasecmp(token, "Upgrade") == 0) { found_upgrade = true; break; } } sdsfreesplitres(tokens, count); if (!found_upgrade) goto error; } const char *ws_version = http_request_get_header(info->request, "Sec-WebSocket-Version"); if (ws_version == NULL || strcmp(ws_version, "13") != 0) goto error; const char *ws_key = http_request_get_header(info->request, "Sec-WebSocket-Key"); if (ws_key == NULL) goto error; const char *protocol_list = http_request_get_header(info->request, "Sec-WebSocket-Protocol"); if (protocol_list && !is_good_protocol(protocol_list, svr->protocol)) goto error; if (strlen(svr->origin) > 0) { const char *origin = http_request_get_header(info->request, "Origin"); if (origin == NULL || !is_good_origin(origin, svr->origin)) goto error; } if (svr->type.on_privdata_alloc) { info->privdata = svr->type.on_privdata_alloc(svr); if (info->privdata == NULL) goto error; } info->upgrade = true; info->remote = sdsnew(http_get_remote_ip(info->ses, info->request)); info->url = sdsnew(info->request->url); if (svr->type.on_upgrade) { svr->type.on_upgrade(info->ses, info->remote); } if (protocol_list) { send_hand_shake_reply(info->ses, svr->protocol, ws_key); } else { send_hand_shake_reply(info->ses, NULL, ws_key); } return 0; error: ws_svr_close_clt(ws_svr_from_ses(info->ses), info->ses); return -1; } Commit Message: Merge pull request #131 from benjaminchodroff/master fix memory corruption and other 32bit overflows CWE ID: CWE-190
static int on_http_message_complete(http_parser* parser) { struct clt_info *info = parser->data; ws_svr *svr = ws_svr_from_ses(info->ses); info->request->version_major = parser->http_major; info->request->version_minor = parser->http_minor; info->request->method = parser->method; dict_entry *entry; dict_iterator *iter = dict_get_iterator(info->request->headers); while ((entry = dict_next(iter)) != NULL) { log_trace("Header: %s: %s", (char *)entry->key, (char *)entry->val); } dict_release_iterator(iter); if (info->request->method != HTTP_GET) goto error; if (http_request_get_header(info->request, "Host") == NULL) goto error; double version = info->request->version_major + info->request->version_minor * 0.1; if (version < 1.1) goto error; const char *upgrade = http_request_get_header(info->request, "Upgrade"); if (upgrade == NULL || strcasecmp(upgrade, "websocket") != 0) goto error; const char *connection = http_request_get_header(info->request, "Connection"); if (connection == NULL || strlen(connection) > UT_WS_SVR_MAX_HEADER_SIZE) goto error; else { bool found_upgrade = false; int count; sds *tokens = sdssplitlen(connection, strlen(connection), ",", 1, &count); if (tokens == NULL) goto error; for (int i = 0; i < count; i++) { sds token = tokens[i]; sdstrim(token, " "); if (strcasecmp(token, "Upgrade") == 0) { found_upgrade = true; break; } } sdsfreesplitres(tokens, count); if (!found_upgrade) goto error; } const char *ws_version = http_request_get_header(info->request, "Sec-WebSocket-Version"); if (ws_version == NULL || strcmp(ws_version, "13") != 0) goto error; const char *ws_key = http_request_get_header(info->request, "Sec-WebSocket-Key"); if (ws_key == NULL) goto error; const char *protocol_list = http_request_get_header(info->request, "Sec-WebSocket-Protocol"); if (protocol_list && !is_good_protocol(protocol_list, svr->protocol)) goto error; if (strlen(svr->origin) > 0) { const char *origin = http_request_get_header(info->request, "Origin"); if (origin == NULL || !is_good_origin(origin, svr->origin)) goto error; } if (svr->type.on_privdata_alloc) { info->privdata = svr->type.on_privdata_alloc(svr); if (info->privdata == NULL) goto error; } info->upgrade = true; info->remote = sdsnew(http_get_remote_ip(info->ses, info->request)); info->url = sdsnew(info->request->url); if (svr->type.on_upgrade) { svr->type.on_upgrade(info->ses, info->remote); } if (protocol_list) { send_hand_shake_reply(info->ses, svr->protocol, ws_key); } else { send_hand_shake_reply(info->ses, NULL, ws_key); } return 0; error: ws_svr_close_clt(ws_svr_from_ses(info->ses), info->ses); return -1; }
169,018
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, const u8 *addr, gfp_t gfp) { struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct timespec uptime; struct ieee80211_tx_latency_bin_ranges *tx_latency; int i; sta = kzalloc(sizeof(*sta) + local->hw.sta_data_size, gfp); if (!sta) return NULL; rcu_read_lock(); tx_latency = rcu_dereference(local->tx_latency); /* init stations Tx latency statistics && TID bins */ if (tx_latency) { sta->tx_lat = kzalloc(IEEE80211_NUM_TIDS * sizeof(struct ieee80211_tx_latency_stat), GFP_ATOMIC); if (!sta->tx_lat) { rcu_read_unlock(); goto free; } if (tx_latency->n_ranges) { for (i = 0; i < IEEE80211_NUM_TIDS; i++) { /* size of bins is size of the ranges +1 */ sta->tx_lat[i].bin_count = tx_latency->n_ranges + 1; sta->tx_lat[i].bins = kcalloc(sta->tx_lat[i].bin_count, sizeof(u32), GFP_ATOMIC); if (!sta->tx_lat[i].bins) { rcu_read_unlock(); goto free; } } } } rcu_read_unlock(); spin_lock_init(&sta->lock); INIT_WORK(&sta->drv_unblock_wk, sta_unblock); INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); mutex_init(&sta->ampdu_mlme.mtx); #ifdef CONFIG_MAC80211_MESH if (ieee80211_vif_is_mesh(&sdata->vif) && !sdata->u.mesh.user_mpm) init_timer(&sta->plink_timer); sta->nonpeer_pm = NL80211_MESH_POWER_ACTIVE; #endif memcpy(sta->sta.addr, addr, ETH_ALEN); sta->local = local; sta->sdata = sdata; sta->last_rx = jiffies; sta->sta_state = IEEE80211_STA_NONE; do_posix_clock_monotonic_gettime(&uptime); sta->last_connected = uptime.tv_sec; ewma_init(&sta->avg_signal, 1024, 8); for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++) ewma_init(&sta->chain_signal_avg[i], 1024, 8); if (sta_prepare_rate_control(local, sta, gfp)) goto free; for (i = 0; i < IEEE80211_NUM_TIDS; i++) { /* * timer_to_tid must be initialized with identity mapping * to enable session_timer's data differentiation. See * sta_rx_agg_session_timer_expired for usage. */ sta->timer_to_tid[i] = i; } for (i = 0; i < IEEE80211_NUM_ACS; i++) { skb_queue_head_init(&sta->ps_tx_buf[i]); skb_queue_head_init(&sta->tx_filtered[i]); } for (i = 0; i < IEEE80211_NUM_TIDS; i++) sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); sta->sta.smps_mode = IEEE80211_SMPS_OFF; if (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { struct ieee80211_supported_band *sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)]; u8 smps = (sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> IEEE80211_HT_CAP_SM_PS_SHIFT; /* * Assume that hostapd advertises our caps in the beacon and * this is the known_smps_mode for a station that just assciated */ switch (smps) { case WLAN_HT_SMPS_CONTROL_DISABLED: sta->known_smps_mode = IEEE80211_SMPS_OFF; break; case WLAN_HT_SMPS_CONTROL_STATIC: sta->known_smps_mode = IEEE80211_SMPS_STATIC; break; case WLAN_HT_SMPS_CONTROL_DYNAMIC: sta->known_smps_mode = IEEE80211_SMPS_DYNAMIC; break; default: WARN_ON(1); } } sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr); return sta; free: if (sta->tx_lat) { for (i = 0; i < IEEE80211_NUM_TIDS; i++) kfree(sta->tx_lat[i].bins); kfree(sta->tx_lat); } kfree(sta); return NULL; } Commit Message: mac80211: fix AP powersave TX vs. wakeup race There is a race between the TX path and the STA wakeup: while a station is sleeping, mac80211 buffers frames until it wakes up, then the frames are transmitted. However, the RX and TX path are concurrent, so the packet indicating wakeup can be processed while a packet is being transmitted. This can lead to a situation where the buffered frames list is emptied on the one side, while a frame is being added on the other side, as the station is still seen as sleeping in the TX path. As a result, the newly added frame will not be send anytime soon. It might be sent much later (and out of order) when the station goes to sleep and wakes up the next time. Additionally, it can lead to the crash below. Fix all this by synchronising both paths with a new lock. Both path are not fastpath since they handle PS situations. In a later patch we'll remove the extra skb queue locks to reduce locking overhead. BUG: unable to handle kernel NULL pointer dereference at 000000b0 IP: [<ff6f1791>] ieee80211_report_used_skb+0x11/0x3e0 [mac80211] *pde = 00000000 Oops: 0000 [#1] SMP DEBUG_PAGEALLOC EIP: 0060:[<ff6f1791>] EFLAGS: 00210282 CPU: 1 EIP is at ieee80211_report_used_skb+0x11/0x3e0 [mac80211] EAX: e5900da0 EBX: 00000000 ECX: 00000001 EDX: 00000000 ESI: e41d00c0 EDI: e5900da0 EBP: ebe458e4 ESP: ebe458b0 DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 CR0: 8005003b CR2: 000000b0 CR3: 25a78000 CR4: 000407d0 DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000 DR6: ffff0ff0 DR7: 00000400 Process iperf (pid: 3934, ti=ebe44000 task=e757c0b0 task.ti=ebe44000) iwlwifi 0000:02:00.0: I iwl_pcie_enqueue_hcmd Sending command LQ_CMD (#4e), seq: 0x0903, 92 bytes at 3[3]:9 Stack: e403b32c ebe458c4 00200002 00200286 e403b338 ebe458cc c10960bb e5900da0 ff76a6ec ebe458d8 00000000 e41d00c0 e5900da0 ebe458f0 ff6f1b75 e403b210 ebe4598c ff723dc1 00000000 ff76a6ec e597c978 e403b758 00000002 00000002 Call Trace: [<ff6f1b75>] ieee80211_free_txskb+0x15/0x20 [mac80211] [<ff723dc1>] invoke_tx_handlers+0x1661/0x1780 [mac80211] [<ff7248a5>] ieee80211_tx+0x75/0x100 [mac80211] [<ff7249bf>] ieee80211_xmit+0x8f/0xc0 [mac80211] [<ff72550e>] ieee80211_subif_start_xmit+0x4fe/0xe20 [mac80211] [<c149ef70>] dev_hard_start_xmit+0x450/0x950 [<c14b9aa9>] sch_direct_xmit+0xa9/0x250 [<c14b9c9b>] __qdisc_run+0x4b/0x150 [<c149f732>] dev_queue_xmit+0x2c2/0xca0 Cc: [email protected] Reported-by: Yaara Rozenblum <[email protected]> Signed-off-by: Emmanuel Grumbach <[email protected]> Reviewed-by: Stanislaw Gruszka <[email protected]> [reword commit log, use a separate lock] Signed-off-by: Johannes Berg <[email protected]> CWE ID: CWE-362
struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, const u8 *addr, gfp_t gfp) { struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct timespec uptime; struct ieee80211_tx_latency_bin_ranges *tx_latency; int i; sta = kzalloc(sizeof(*sta) + local->hw.sta_data_size, gfp); if (!sta) return NULL; rcu_read_lock(); tx_latency = rcu_dereference(local->tx_latency); /* init stations Tx latency statistics && TID bins */ if (tx_latency) { sta->tx_lat = kzalloc(IEEE80211_NUM_TIDS * sizeof(struct ieee80211_tx_latency_stat), GFP_ATOMIC); if (!sta->tx_lat) { rcu_read_unlock(); goto free; } if (tx_latency->n_ranges) { for (i = 0; i < IEEE80211_NUM_TIDS; i++) { /* size of bins is size of the ranges +1 */ sta->tx_lat[i].bin_count = tx_latency->n_ranges + 1; sta->tx_lat[i].bins = kcalloc(sta->tx_lat[i].bin_count, sizeof(u32), GFP_ATOMIC); if (!sta->tx_lat[i].bins) { rcu_read_unlock(); goto free; } } } } rcu_read_unlock(); spin_lock_init(&sta->lock); spin_lock_init(&sta->ps_lock); INIT_WORK(&sta->drv_unblock_wk, sta_unblock); INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); mutex_init(&sta->ampdu_mlme.mtx); #ifdef CONFIG_MAC80211_MESH if (ieee80211_vif_is_mesh(&sdata->vif) && !sdata->u.mesh.user_mpm) init_timer(&sta->plink_timer); sta->nonpeer_pm = NL80211_MESH_POWER_ACTIVE; #endif memcpy(sta->sta.addr, addr, ETH_ALEN); sta->local = local; sta->sdata = sdata; sta->last_rx = jiffies; sta->sta_state = IEEE80211_STA_NONE; do_posix_clock_monotonic_gettime(&uptime); sta->last_connected = uptime.tv_sec; ewma_init(&sta->avg_signal, 1024, 8); for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++) ewma_init(&sta->chain_signal_avg[i], 1024, 8); if (sta_prepare_rate_control(local, sta, gfp)) goto free; for (i = 0; i < IEEE80211_NUM_TIDS; i++) { /* * timer_to_tid must be initialized with identity mapping * to enable session_timer's data differentiation. See * sta_rx_agg_session_timer_expired for usage. */ sta->timer_to_tid[i] = i; } for (i = 0; i < IEEE80211_NUM_ACS; i++) { skb_queue_head_init(&sta->ps_tx_buf[i]); skb_queue_head_init(&sta->tx_filtered[i]); } for (i = 0; i < IEEE80211_NUM_TIDS; i++) sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); sta->sta.smps_mode = IEEE80211_SMPS_OFF; if (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { struct ieee80211_supported_band *sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)]; u8 smps = (sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> IEEE80211_HT_CAP_SM_PS_SHIFT; /* * Assume that hostapd advertises our caps in the beacon and * this is the known_smps_mode for a station that just assciated */ switch (smps) { case WLAN_HT_SMPS_CONTROL_DISABLED: sta->known_smps_mode = IEEE80211_SMPS_OFF; break; case WLAN_HT_SMPS_CONTROL_STATIC: sta->known_smps_mode = IEEE80211_SMPS_STATIC; break; case WLAN_HT_SMPS_CONTROL_DYNAMIC: sta->known_smps_mode = IEEE80211_SMPS_DYNAMIC; break; default: WARN_ON(1); } } sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr); return sta; free: if (sta->tx_lat) { for (i = 0; i < IEEE80211_NUM_TIDS; i++) kfree(sta->tx_lat[i].bins); kfree(sta->tx_lat); } kfree(sta); return NULL; }
166,392
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool RenderViewHostManager::ShouldSwapProcessesForNavigation( const NavigationEntry* curr_entry, const NavigationEntryImpl* new_entry) const { DCHECK(new_entry); const GURL& current_url = (curr_entry) ? curr_entry->GetURL() : render_view_host_->GetSiteInstance()->GetSiteURL(); BrowserContext* browser_context = delegate_->GetControllerForRenderManager().GetBrowserContext(); if (WebUIControllerFactoryRegistry::GetInstance()->UseWebUIForURL( browser_context, current_url)) { if (!WebUIControllerFactoryRegistry::GetInstance()->IsURLAcceptableForWebUI( browser_context, new_entry->GetURL(), false)) { return true; } } else { if (WebUIControllerFactoryRegistry::GetInstance()->UseWebUIForURL( browser_context, new_entry->GetURL())) { return true; } } if (GetContentClient()->browser()->ShouldSwapProcessesForNavigation( curr_entry ? curr_entry->GetURL() : GURL(), new_entry->GetURL())) { return true; } if (!curr_entry) return false; if (curr_entry->IsViewSourceMode() != new_entry->IsViewSourceMode()) return true; return false; } Commit Message: Ensure extensions and the Chrome Web Store are loaded in new BrowsingInstances. BUG=174943 TEST=Can't post message to CWS. See bug for repro steps. Review URL: https://chromiumcodereview.appspot.com/12301013 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@184208 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-264
bool RenderViewHostManager::ShouldSwapProcessesForNavigation( const NavigationEntry* curr_entry, const NavigationEntryImpl* new_entry) const { DCHECK(new_entry); const GURL& current_url = (curr_entry) ? curr_entry->GetURL() : render_view_host_->GetSiteInstance()->GetSiteURL(); BrowserContext* browser_context = delegate_->GetControllerForRenderManager().GetBrowserContext(); if (WebUIControllerFactoryRegistry::GetInstance()->UseWebUIForURL( browser_context, current_url)) { if (!WebUIControllerFactoryRegistry::GetInstance()->IsURLAcceptableForWebUI( browser_context, new_entry->GetURL(), false)) { return true; } } else { if (WebUIControllerFactoryRegistry::GetInstance()->UseWebUIForURL( browser_context, new_entry->GetURL())) { return true; } } if (GetContentClient()->browser()->ShouldSwapProcessesForNavigation( render_view_host_->GetSiteInstance(), curr_entry ? curr_entry->GetURL() : GURL(), new_entry->GetURL())) { return true; } if (!curr_entry) return false; if (curr_entry->IsViewSourceMode() != new_entry->IsViewSourceMode()) return true; return false; }
171,437
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *a, ASN1_BIT_STRING *signature, void *asn, EVP_PKEY *pkey) { EVP_MD_CTX ctx; unsigned char *buf_in=NULL; int ret= -1,inl; int mdnid, pknid; if (!pkey) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY, ERR_R_PASSED_NULL_PARAMETER); return -1; } if (signature->type == V_ASN1_BIT_STRING && signature->flags & 0x7) { ASN1err(ASN1_F_ASN1_VERIFY, ASN1_R_INVALID_BIT_STRING_BITS_LEFT); return -1; } EVP_MD_CTX_init(&ctx); /* Convert signature OID into digest and public key OIDs */ if (!OBJ_find_sigid_algs(OBJ_obj2nid(a->algorithm), &mdnid, &pknid)) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_UNKNOWN_SIGNATURE_ALGORITHM); goto err; } if (mdnid == NID_undef) { if (!pkey->ameth || !pkey->ameth->item_verify) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_UNKNOWN_SIGNATURE_ALGORITHM); goto err; } ret = pkey->ameth->item_verify(&ctx, it, asn, a, signature, pkey); /* Return value of 2 means carry on, anything else means we * exit straight away: either a fatal error of the underlying * verification routine handles all verification. */ if (ret != 2) goto err; ret = -1; } else { const EVP_MD *type; type=EVP_get_digestbynid(mdnid); if (type == NULL) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_UNKNOWN_MESSAGE_DIGEST_ALGORITHM); goto err; } /* Check public key OID matches public key type */ if (EVP_PKEY_type(pknid) != pkey->ameth->pkey_id) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_WRONG_PUBLIC_KEY_TYPE); goto err; } if (!EVP_DigestVerifyInit(&ctx, NULL, type, NULL, pkey)) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB); ret=0; goto err; } } inl = ASN1_item_i2d(asn, &buf_in, it); if (buf_in == NULL) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_MALLOC_FAILURE); goto err; } ret = EVP_DigestVerifyUpdate(&ctx,buf_in,inl); OPENSSL_cleanse(buf_in,(unsigned int)inl); OPENSSL_free(buf_in); if (!ret) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB); goto err; } ret = -1; if (EVP_DigestVerifyFinal(&ctx,signature->data, (size_t)signature->length) <= 0) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB); ret=0; goto err; } /* we don't need to zero the 'ctx' because we just checked * public information */ /* memset(&ctx,0,sizeof(ctx)); */ ret=1; err: EVP_MD_CTX_cleanup(&ctx); return(ret); } Commit Message: use correct function name Reviewed-by: Rich Salz <[email protected]> Reviewed-by: Matt Caswell <[email protected]> CWE ID: CWE-310
int ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *a, ASN1_BIT_STRING *signature, void *asn, EVP_PKEY *pkey) { EVP_MD_CTX ctx; unsigned char *buf_in=NULL; int ret= -1,inl; int mdnid, pknid; if (!pkey) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY, ERR_R_PASSED_NULL_PARAMETER); return -1; } if (signature->type == V_ASN1_BIT_STRING && signature->flags & 0x7) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY, ASN1_R_INVALID_BIT_STRING_BITS_LEFT); return -1; } EVP_MD_CTX_init(&ctx); /* Convert signature OID into digest and public key OIDs */ if (!OBJ_find_sigid_algs(OBJ_obj2nid(a->algorithm), &mdnid, &pknid)) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_UNKNOWN_SIGNATURE_ALGORITHM); goto err; } if (mdnid == NID_undef) { if (!pkey->ameth || !pkey->ameth->item_verify) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_UNKNOWN_SIGNATURE_ALGORITHM); goto err; } ret = pkey->ameth->item_verify(&ctx, it, asn, a, signature, pkey); /* Return value of 2 means carry on, anything else means we * exit straight away: either a fatal error of the underlying * verification routine handles all verification. */ if (ret != 2) goto err; ret = -1; } else { const EVP_MD *type; type=EVP_get_digestbynid(mdnid); if (type == NULL) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_UNKNOWN_MESSAGE_DIGEST_ALGORITHM); goto err; } /* Check public key OID matches public key type */ if (EVP_PKEY_type(pknid) != pkey->ameth->pkey_id) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ASN1_R_WRONG_PUBLIC_KEY_TYPE); goto err; } if (!EVP_DigestVerifyInit(&ctx, NULL, type, NULL, pkey)) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB); ret=0; goto err; } } inl = ASN1_item_i2d(asn, &buf_in, it); if (buf_in == NULL) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_MALLOC_FAILURE); goto err; } ret = EVP_DigestVerifyUpdate(&ctx,buf_in,inl); OPENSSL_cleanse(buf_in,(unsigned int)inl); OPENSSL_free(buf_in); if (!ret) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB); goto err; } ret = -1; if (EVP_DigestVerifyFinal(&ctx,signature->data, (size_t)signature->length) <= 0) { ASN1err(ASN1_F_ASN1_ITEM_VERIFY,ERR_R_EVP_LIB); ret=0; goto err; } /* we don't need to zero the 'ctx' because we just checked * public information */ /* memset(&ctx,0,sizeof(ctx)); */ ret=1; err: EVP_MD_CTX_cleanup(&ctx); return(ret); }
166,793
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int ext4_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); int error, rc = 0; int orphan = 0; const unsigned int ia_valid = attr->ia_valid; error = inode_change_ok(inode, attr); if (error) return error; if (is_quota_modification(inode, attr)) { error = dquot_initialize(inode); if (error) return error; } if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { handle_t *handle; /* (user+group)*(old+new) structure, inode write (sb, * inode block, ? - but truncate inode update has it) */ handle = ext4_journal_start(inode, EXT4_HT_QUOTA, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) + EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3); if (IS_ERR(handle)) { error = PTR_ERR(handle); goto err_out; } error = dquot_transfer(inode, attr); if (error) { ext4_journal_stop(handle); return error; } /* Update corresponding info in inode so that everything is in * one transaction */ if (attr->ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (attr->ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; error = ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); } if (attr->ia_valid & ATTR_SIZE) { handle_t *handle; loff_t oldsize = inode->i_size; int shrink = (attr->ia_size <= inode->i_size); if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); if (attr->ia_size > sbi->s_bitmap_maxbytes) return -EFBIG; } if (!S_ISREG(inode->i_mode)) return -EINVAL; if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size) inode_inc_iversion(inode); if (ext4_should_order_data(inode) && (attr->ia_size < inode->i_size)) { error = ext4_begin_ordered_truncate(inode, attr->ia_size); if (error) goto err_out; } if (attr->ia_size != inode->i_size) { handle = ext4_journal_start(inode, EXT4_HT_INODE, 3); if (IS_ERR(handle)) { error = PTR_ERR(handle); goto err_out; } if (ext4_handle_valid(handle) && shrink) { error = ext4_orphan_add(handle, inode); orphan = 1; } /* * Update c/mtime on truncate up, ext4_truncate() will * update c/mtime in shrink case below */ if (!shrink) { inode->i_mtime = ext4_current_time(inode); inode->i_ctime = inode->i_mtime; } down_write(&EXT4_I(inode)->i_data_sem); EXT4_I(inode)->i_disksize = attr->ia_size; rc = ext4_mark_inode_dirty(handle, inode); if (!error) error = rc; /* * We have to update i_size under i_data_sem together * with i_disksize to avoid races with writeback code * running ext4_wb_update_i_disksize(). */ if (!error) i_size_write(inode, attr->ia_size); up_write(&EXT4_I(inode)->i_data_sem); ext4_journal_stop(handle); if (error) { if (orphan) ext4_orphan_del(NULL, inode); goto err_out; } } if (!shrink) pagecache_isize_extended(inode, oldsize, inode->i_size); /* * Blocks are going to be removed from the inode. Wait * for dio in flight. Temporarily disable * dioread_nolock to prevent livelock. */ if (orphan) { if (!ext4_should_journal_data(inode)) { ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); ext4_inode_resume_unlocked_dio(inode); } else ext4_wait_for_tail_page_commit(inode); } /* * Truncate pagecache after we've waited for commit * in data=journal mode to make pages freeable. */ truncate_pagecache(inode, inode->i_size); if (shrink) ext4_truncate(inode); } if (!rc) { setattr_copy(inode, attr); mark_inode_dirty(inode); } /* * If the call to ext4_truncate failed to get a transaction handle at * all, we need to clean up the in-core orphan list manually. */ if (orphan && inode->i_nlink) ext4_orphan_del(NULL, inode); if (!rc && (ia_valid & ATTR_MODE)) rc = posix_acl_chmod(inode, inode->i_mode); err_out: ext4_std_error(inode->i_sb, error); if (!error) error = rc; return error; } Commit Message: ext4: fix races between page faults and hole punching Currently, page faults and hole punching are completely unsynchronized. This can result in page fault faulting in a page into a range that we are punching after truncate_pagecache_range() has been called and thus we can end up with a page mapped to disk blocks that will be shortly freed. Filesystem corruption will shortly follow. Note that the same race is avoided for truncate by checking page fault offset against i_size but there isn't similar mechanism available for punching holes. Fix the problem by creating new rw semaphore i_mmap_sem in inode and grab it for writing over truncate, hole punching, and other functions removing blocks from extent tree and for read over page faults. We cannot easily use i_data_sem for this since that ranks below transaction start and we need something ranking above it so that it can be held over the whole truncate / hole punching operation. Also remove various workarounds we had in the code to reduce race window when page fault could have created pages with stale mapping information. Signed-off-by: Jan Kara <[email protected]> Signed-off-by: Theodore Ts'o <[email protected]> CWE ID: CWE-362
int ext4_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); int error, rc = 0; int orphan = 0; const unsigned int ia_valid = attr->ia_valid; error = inode_change_ok(inode, attr); if (error) return error; if (is_quota_modification(inode, attr)) { error = dquot_initialize(inode); if (error) return error; } if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { handle_t *handle; /* (user+group)*(old+new) structure, inode write (sb, * inode block, ? - but truncate inode update has it) */ handle = ext4_journal_start(inode, EXT4_HT_QUOTA, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) + EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3); if (IS_ERR(handle)) { error = PTR_ERR(handle); goto err_out; } error = dquot_transfer(inode, attr); if (error) { ext4_journal_stop(handle); return error; } /* Update corresponding info in inode so that everything is in * one transaction */ if (attr->ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (attr->ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; error = ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); } if (attr->ia_valid & ATTR_SIZE) { handle_t *handle; loff_t oldsize = inode->i_size; int shrink = (attr->ia_size <= inode->i_size); if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); if (attr->ia_size > sbi->s_bitmap_maxbytes) return -EFBIG; } if (!S_ISREG(inode->i_mode)) return -EINVAL; if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size) inode_inc_iversion(inode); if (ext4_should_order_data(inode) && (attr->ia_size < inode->i_size)) { error = ext4_begin_ordered_truncate(inode, attr->ia_size); if (error) goto err_out; } if (attr->ia_size != inode->i_size) { handle = ext4_journal_start(inode, EXT4_HT_INODE, 3); if (IS_ERR(handle)) { error = PTR_ERR(handle); goto err_out; } if (ext4_handle_valid(handle) && shrink) { error = ext4_orphan_add(handle, inode); orphan = 1; } /* * Update c/mtime on truncate up, ext4_truncate() will * update c/mtime in shrink case below */ if (!shrink) { inode->i_mtime = ext4_current_time(inode); inode->i_ctime = inode->i_mtime; } down_write(&EXT4_I(inode)->i_data_sem); EXT4_I(inode)->i_disksize = attr->ia_size; rc = ext4_mark_inode_dirty(handle, inode); if (!error) error = rc; /* * We have to update i_size under i_data_sem together * with i_disksize to avoid races with writeback code * running ext4_wb_update_i_disksize(). */ if (!error) i_size_write(inode, attr->ia_size); up_write(&EXT4_I(inode)->i_data_sem); ext4_journal_stop(handle); if (error) { if (orphan) ext4_orphan_del(NULL, inode); goto err_out; } } if (!shrink) pagecache_isize_extended(inode, oldsize, inode->i_size); /* * Blocks are going to be removed from the inode. Wait * for dio in flight. Temporarily disable * dioread_nolock to prevent livelock. */ if (orphan) { if (!ext4_should_journal_data(inode)) { ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); ext4_inode_resume_unlocked_dio(inode); } else ext4_wait_for_tail_page_commit(inode); } down_write(&EXT4_I(inode)->i_mmap_sem); /* * Truncate pagecache after we've waited for commit * in data=journal mode to make pages freeable. */ truncate_pagecache(inode, inode->i_size); if (shrink) ext4_truncate(inode); up_write(&EXT4_I(inode)->i_mmap_sem); } if (!rc) { setattr_copy(inode, attr); mark_inode_dirty(inode); } /* * If the call to ext4_truncate failed to get a transaction handle at * all, we need to clean up the in-core orphan list manually. */ if (orphan && inode->i_nlink) ext4_orphan_del(NULL, inode); if (!rc && (ia_valid & ATTR_MODE)) rc = posix_acl_chmod(inode, inode->i_mode); err_out: ext4_std_error(inode->i_sb, error); if (!error) error = rc; return error; }
167,491
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int dns_parse_callback(void *c, int rr, const void *data, int len, const void *packet) { char tmp[256]; struct dpc_ctx *ctx = c; switch (rr) { case RR_A: if (len != 4) return -1; ctx->addrs[ctx->cnt].scopeid = 0; memcpy(ctx->addrs[ctx->cnt++].addr, data, 4); break; case RR_AAAA: if (len != 16) return -1; ctx->addrs[ctx->cnt].family = AF_INET6; ctx->addrs[ctx->cnt].scopeid = 0; memcpy(ctx->addrs[ctx->cnt++].addr, data, 16); break; case RR_CNAME: if (__dn_expand(packet, (const unsigned char *)packet + 512, data, tmp, sizeof tmp) > 0 && is_valid_hostname(tmp)) strcpy(ctx->canon, tmp); break; } return 0; } Commit Message: CWE ID: CWE-119
static int dns_parse_callback(void *c, int rr, const void *data, int len, const void *packet) { char tmp[256]; struct dpc_ctx *ctx = c; if (ctx->cnt >= MAXADDRS) return -1; switch (rr) { case RR_A: if (len != 4) return -1; ctx->addrs[ctx->cnt].scopeid = 0; memcpy(ctx->addrs[ctx->cnt++].addr, data, 4); break; case RR_AAAA: if (len != 16) return -1; ctx->addrs[ctx->cnt].family = AF_INET6; ctx->addrs[ctx->cnt].scopeid = 0; memcpy(ctx->addrs[ctx->cnt++].addr, data, 16); break; case RR_CNAME: if (__dn_expand(packet, (const unsigned char *)packet + 512, data, tmp, sizeof tmp) > 0 && is_valid_hostname(tmp)) strcpy(ctx->canon, tmp); break; } return 0; }
164,652
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: LONG ValidateSignature(HWND hDlg, const char* path) { LONG r; WINTRUST_DATA trust_data = { 0 }; WINTRUST_FILE_INFO trust_file = { 0 }; GUID guid_generic_verify = // WINTRUST_ACTION_GENERIC_VERIFY_V2 { 0xaac56b, 0xcd44, 0x11d0,{ 0x8c, 0xc2, 0x0, 0xc0, 0x4f, 0xc2, 0x95, 0xee } }; char *signature_name; size_t i, len; signature_name = GetSignatureName(path); if (signature_name == NULL) { uprintf("PKI: Could not get signature name"); MessageBoxExU(hDlg, lmprintf(MSG_284), lmprintf(MSG_283), MB_OK | MB_ICONERROR | MB_IS_RTL, selected_langid); return TRUST_E_NOSIGNATURE; } for (i = 0; i < ARRAYSIZE(cert_name); i++) { len = strlen(cert_name[i]); if (strncmp(signature_name, cert_name[i], len) == 0) { if ((len >= strlen(signature_name)) || isspace(signature_name[len])) break; } } if (i >= ARRAYSIZE(cert_name)) { uprintf("PKI: Signature '%s' is unexpected...", signature_name); if (MessageBoxExU(hDlg, lmprintf(MSG_285, signature_name), lmprintf(MSG_283), MB_YESNO | MB_ICONWARNING | MB_IS_RTL, selected_langid) != IDYES) return TRUST_E_EXPLICIT_DISTRUST; } trust_file.cbStruct = sizeof(trust_file); trust_file.pcwszFilePath = utf8_to_wchar(path); if (trust_file.pcwszFilePath == NULL) { uprintf("PKI: Unable to convert '%s' to UTF16", path); return ERROR_SEVERITY_ERROR | FAC(FACILITY_CERT) | ERROR_NOT_ENOUGH_MEMORY; } trust_data.cbStruct = sizeof(trust_data); trust_data.dwUIChoice = WTD_UI_ALL; trust_data.fdwRevocationChecks = WTD_REVOKE_WHOLECHAIN; trust_data.dwProvFlags = WTD_REVOCATION_CHECK_CHAIN | 0x400; trust_data.dwUnionChoice = WTD_CHOICE_FILE; trust_data.pFile = &trust_file; r = WinVerifyTrust(NULL, &guid_generic_verify, &trust_data); safe_free(trust_file.pcwszFilePath); return r; } Commit Message: [pki] fix https://www.kb.cert.org/vuls/id/403768 * This commit effectively fixes https://www.kb.cert.org/vuls/id/403768 (CVE-2017-13083) as it is described per its revision 11, which is the latest revision at the time of this commit, by disabling Windows prompts, enacted during signature validation, that allow the user to bypass the intended signature verification checks. * It needs to be pointed out that the vulnerability ("allow(ing) the use of a self-signed certificate"), which relies on the end-user actively ignoring a Windows prompt that tells them that the update failed the signature validation whilst also advising against running it, is being fully addressed, even as the update protocol remains HTTP. * It also need to be pointed out that the extended delay (48 hours) between the time the vulnerability was reported and the moment it is fixed in our codebase has to do with the fact that the reporter chose to deviate from standard security practices by not disclosing the details of the vulnerability with us, be it publicly or privately, before creating the cert.org report. The only advance notification we received was a generic note about the use of HTTP vs HTTPS, which, as have established, is not immediately relevant to addressing the reported vulnerability. * Closes #1009 * Note: The other vulnerability scenario described towards the end of #1009, which doesn't have to do with the "lack of CA checking", will be addressed separately. CWE ID: CWE-494
LONG ValidateSignature(HWND hDlg, const char* path) { LONG r; WINTRUST_DATA trust_data = { 0 }; WINTRUST_FILE_INFO trust_file = { 0 }; GUID guid_generic_verify = // WINTRUST_ACTION_GENERIC_VERIFY_V2 { 0xaac56b, 0xcd44, 0x11d0,{ 0x8c, 0xc2, 0x0, 0xc0, 0x4f, 0xc2, 0x95, 0xee } }; char *signature_name; size_t i, len; signature_name = GetSignatureName(path); if (signature_name == NULL) { uprintf("PKI: Could not get signature name"); MessageBoxExU(hDlg, lmprintf(MSG_284), lmprintf(MSG_283), MB_OK | MB_ICONERROR | MB_IS_RTL, selected_langid); return TRUST_E_NOSIGNATURE; } for (i = 0; i < ARRAYSIZE(cert_name); i++) { len = strlen(cert_name[i]); if (strncmp(signature_name, cert_name[i], len) == 0) { if ((len >= strlen(signature_name)) || isspace(signature_name[len])) break; } } if (i >= ARRAYSIZE(cert_name)) { uprintf("PKI: Signature '%s' is unexpected...", signature_name); if (MessageBoxExU(hDlg, lmprintf(MSG_285, signature_name), lmprintf(MSG_283), MB_YESNO | MB_ICONWARNING | MB_IS_RTL, selected_langid) != IDYES) return TRUST_E_EXPLICIT_DISTRUST; } trust_file.cbStruct = sizeof(trust_file); trust_file.pcwszFilePath = utf8_to_wchar(path); if (trust_file.pcwszFilePath == NULL) { uprintf("PKI: Unable to convert '%s' to UTF16", path); return ERROR_SEVERITY_ERROR | FAC(FACILITY_CERT) | ERROR_NOT_ENOUGH_MEMORY; } trust_data.cbStruct = sizeof(trust_data); // NB: WTD_UI_ALL can result in ERROR_SUCCESS even if the signature validation fails, // because it still prompts the user to run untrusted software, even after explicitly // notifying them that the signature invalid (and of course Microsoft had to make // that UI prompt a bit too similar to the other benign prompt you get when running // trusted software, which, as per cert.org's assessment, may confuse non-security // conscious-users who decide to gloss over these kind of notifications). trust_data.dwUIChoice = WTD_UI_NONE; trust_data.fdwRevocationChecks = WTD_REVOKE_WHOLECHAIN; trust_data.dwProvFlags = WTD_REVOCATION_CHECK_CHAIN | 0x400; trust_data.dwUnionChoice = WTD_CHOICE_FILE; trust_data.pFile = &trust_file; r = WinVerifyTrust(NULL, &guid_generic_verify, &trust_data); safe_free(trust_file.pcwszFilePath); switch (r) { case ERROR_SUCCESS: break; case TRUST_E_NOSIGNATURE: // Should already have been reported, but since we have a custom message for it... uprintf("PKI: File does not appear to be signed: %s", WinPKIErrorString()); MessageBoxExU(hDlg, lmprintf(MSG_284), lmprintf(MSG_283), MB_OK | MB_ICONERROR | MB_IS_RTL, selected_langid); break; default: uprintf("PKI: Failed to validate signature: %s", WinPKIErrorString()); MessageBoxExU(hDlg, lmprintf(MSG_240), lmprintf(MSG_283), MB_OK | MB_ICONERROR | MB_IS_RTL, selected_langid); break; } return r; }
167,815
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void BluetoothDeviceChromeOS::UnregisterAgent() { if (!agent_.get()) return; DCHECK(pairing_delegate_); DCHECK(pincode_callback_.is_null()); DCHECK(passkey_callback_.is_null()); DCHECK(confirmation_callback_.is_null()); pairing_delegate_->DismissDisplayOrConfirm(); pairing_delegate_ = NULL; agent_.reset(); VLOG(1) << object_path_.value() << ": Unregistering pairing agent"; DBusThreadManager::Get()->GetBluetoothAgentManagerClient()-> UnregisterAgent( dbus::ObjectPath(kAgentPath), base::Bind(&base::DoNothing), base::Bind(&BluetoothDeviceChromeOS::OnUnregisterAgentError, weak_ptr_factory_.GetWeakPtr())); } Commit Message: Refactor to support default Bluetooth pairing delegate In order to support a default pairing delegate we need to move the agent service provider delegate implementation from BluetoothDevice to BluetoothAdapter while retaining the existing API. BUG=338492 TEST=device_unittests, unit_tests, browser_tests Review URL: https://codereview.chromium.org/148293003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@252216 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID:
void BluetoothDeviceChromeOS::UnregisterAgent() {
171,241
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: const BlockEntry* Track::GetEOS() const { return &m_eos; } Commit Message: libwebm: Pull from upstream Rolling mkvparser from upstream. Primarily for fixing a bug on parsing failures with certain Opus WebM files. Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae The diff is so huge because there were some style clean ups upstream. But it was ensured that there were no breaking changes when the style clean ups was done upstream. Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039 CWE ID: CWE-119
const BlockEntry* Track::GetEOS() const
174,309
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int pfkey_recvmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct pfkey_sock *pfk = pfkey_sk(sk); struct sk_buff *skb; int copied, err; err = -EINVAL; if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) goto out; msg->msg_namelen = 0; skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out; copied = skb->len; if (copied > len) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto out_free; sock_recv_ts_and_drops(msg, sk, skb); err = (flags & MSG_TRUNC) ? skb->len : copied; if (pfk->dump.dump != NULL && 3 * atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) pfkey_do_dump(pfk); out_free: skb_free_datagram(sk, skb); out: return err; } Commit Message: net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-20
static int pfkey_recvmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct pfkey_sock *pfk = pfkey_sk(sk); struct sk_buff *skb; int copied, err; err = -EINVAL; if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) goto out; skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out; copied = skb->len; if (copied > len) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto out_free; sock_recv_ts_and_drops(msg, sk, skb); err = (flags & MSG_TRUNC) ? skb->len : copied; if (pfk->dump.dump != NULL && 3 * atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) pfkey_do_dump(pfk); out_free: skb_free_datagram(sk, skb); out: return err; }
166,504
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool LookupMatchInTopDomains(base::StringPiece skeleton) { DCHECK_NE(skeleton.back(), '.'); auto labels = base::SplitStringPiece(skeleton, ".", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL); if (labels.size() > kNumberOfLabelsToCheck) { labels.erase(labels.begin(), labels.begin() + labels.size() - kNumberOfLabelsToCheck); } while (labels.size() > 1) { std::string partial_skeleton = base::JoinString(labels, "."); if (net::LookupStringInFixedSet( g_graph, g_graph_length, partial_skeleton.data(), partial_skeleton.length()) != net::kDafsaNotFound) return true; labels.erase(labels.begin()); } return false; } Commit Message: Map U+04CF to lowercase L as well. U+04CF (ӏ) has the confusability skeleton of 'i' (lowercase I), but it can be confused for 'l' (lowercase L) or '1' (digit) if rendered in some fonts. If a host name contains it, calculate the confusability skeleton twice, once with the default mapping to 'i' (lowercase I) and the 2nd time with an alternative mapping to 'l'. Mapping them to 'l' (lowercase L) also gets it treated as similar to digit 1 because the confusability skeleton of digit 1 is 'l'. Bug: 817247 Test: components_unittests --gtest_filter=*IDN* Change-Id: I7442b950c9457eea285e17f01d1f43c9acc5d79c Reviewed-on: https://chromium-review.googlesource.com/974165 Commit-Queue: Jungshik Shin <[email protected]> Reviewed-by: Peter Kasting <[email protected]> Reviewed-by: Eric Lawrence <[email protected]> Cr-Commit-Position: refs/heads/master@{#551263} CWE ID:
bool LookupMatchInTopDomains(base::StringPiece skeleton) { bool LookupMatchInTopDomains(const icu::UnicodeString& ustr_skeleton) { std::string skeleton; ustr_skeleton.toUTF8String(skeleton); DCHECK_NE(skeleton.back(), '.'); auto labels = base::SplitStringPiece(skeleton, ".", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL); if (labels.size() > kNumberOfLabelsToCheck) { labels.erase(labels.begin(), labels.begin() + labels.size() - kNumberOfLabelsToCheck); } while (labels.size() > 1) { std::string partial_skeleton = base::JoinString(labels, "."); if (net::LookupStringInFixedSet( g_graph, g_graph_length, partial_skeleton.data(), partial_skeleton.length()) != net::kDafsaNotFound) return true; labels.erase(labels.begin()); } return false; }
173,223
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: HTMLFrameOwnerElement::HTMLFrameOwnerElement(const QualifiedName& tag_name, Document& document) : HTMLElement(tag_name, document), content_frame_(nullptr), embedded_content_view_(nullptr), sandbox_flags_(kSandboxNone) {} Commit Message: Resource Timing: Do not report subsequent navigations within subframes We only want to record resource timing for the load that was initiated by parent document. We filter out subsequent navigations for <iframe>, but we should do it for other types of subframes too. Bug: 780312 Change-Id: I3a7b9e1a365c99e24bb8dac190e88c7099fc3da5 Reviewed-on: https://chromium-review.googlesource.com/750487 Reviewed-by: Nate Chapin <[email protected]> Commit-Queue: Kunihiko Sakamoto <[email protected]> Cr-Commit-Position: refs/heads/master@{#513665} CWE ID: CWE-601
HTMLFrameOwnerElement::HTMLFrameOwnerElement(const QualifiedName& tag_name, Document& document) : HTMLElement(tag_name, document), content_frame_(nullptr), embedded_content_view_(nullptr),
172,928
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: ShellWindow::ShellWindow(Profile* profile, const extensions::Extension* extension, const GURL& url) : profile_(profile), extension_(extension), ALLOW_THIS_IN_INITIALIZER_LIST( extension_function_dispatcher_(profile, this)) { web_contents_ = WebContents::Create( profile, SiteInstance::CreateForURL(profile, url), MSG_ROUTING_NONE, NULL, NULL); contents_.reset(new TabContents(web_contents_)); content::WebContentsObserver::Observe(web_contents_); web_contents_->SetDelegate(this); chrome::SetViewType(web_contents_, chrome::VIEW_TYPE_APP_SHELL); web_contents_->GetMutableRendererPrefs()-> browser_handles_all_top_level_requests = true; web_contents_->GetRenderViewHost()->SyncRendererPrefs(); web_contents_->GetController().LoadURL( url, content::Referrer(), content::PAGE_TRANSITION_LINK, std::string()); registrar_.Add(this, chrome::NOTIFICATION_EXTENSION_UNLOADED, content::Source<Profile>(profile_)); registrar_.Add(this, content::NOTIFICATION_APP_TERMINATING, content::NotificationService::AllSources()); TabContents* tab_contents = TabContents::FromWebContents(web_contents_); InfoBarTabHelper* infobar_helper = tab_contents->infobar_tab_helper(); infobar_helper->set_infobars_enabled(false); browser::StartKeepAlive(); } Commit Message: Make chrome.appWindow.create() provide access to the child window at a predictable time. When you first create a window with chrome.appWindow.create(), it won't have loaded any resources. So, at create time, you are guaranteed that: child_window.location.href == 'about:blank' child_window.document.documentElement.outerHTML == '<html><head></head><body></body></html>' This is in line with the behaviour of window.open(). BUG=131735 TEST=browser_tests:PlatformAppBrowserTest.WindowsApi Committed: http://src.chromium.org/viewvc/chrome?view=rev&revision=144072 Review URL: https://chromiumcodereview.appspot.com/10644006 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@144356 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
ShellWindow::ShellWindow(Profile* profile, const extensions::Extension* extension, const GURL& url) : profile_(profile), extension_(extension), ALLOW_THIS_IN_INITIALIZER_LIST( extension_function_dispatcher_(profile, this)) { web_contents_ = WebContents::Create( profile, SiteInstance::CreateForURL(profile, url), MSG_ROUTING_NONE, NULL, NULL); contents_.reset(new TabContents(web_contents_)); content::WebContentsObserver::Observe(web_contents_); web_contents_->SetDelegate(this); chrome::SetViewType(web_contents_, chrome::VIEW_TYPE_APP_SHELL); web_contents_->GetMutableRendererPrefs()-> browser_handles_all_top_level_requests = true; web_contents_->GetRenderViewHost()->SyncRendererPrefs(); // Block the created RVH from loading anything until the background page // has had a chance to do any initialization it wants. SuspendRenderViewHost(web_contents_->GetRenderViewHost()); // TODO(jeremya): there's a bug where navigating a web contents to an // extension URL causes it to create a new RVH and discard the old (perfectly // usable) one. To work around this, we watch for a RVH_CHANGED message from // the web contents (which will be sent during LoadURL) and suspend resource // requests on the new RVH to ensure that we block the new RVH from loading // anything. It should be okay to remove the NOTIFICATION_RVH_CHANGED // registration once http://crbug.com/123007 is fixed. registrar_.Add(this, content::NOTIFICATION_RENDER_VIEW_HOST_CHANGED, content::Source<content::NavigationController>( &web_contents_->GetController())); web_contents_->GetController().LoadURL( url, content::Referrer(), content::PAGE_TRANSITION_LINK, std::string()); registrar_.RemoveAll(); registrar_.Add(this, chrome::NOTIFICATION_EXTENSION_UNLOADED, content::Source<Profile>(profile_)); registrar_.Add(this, content::NOTIFICATION_APP_TERMINATING, content::NotificationService::AllSources()); TabContents* tab_contents = TabContents::FromWebContents(web_contents_); InfoBarTabHelper* infobar_helper = tab_contents->infobar_tab_helper(); infobar_helper->set_infobars_enabled(false); browser::StartKeepAlive(); }
170,814
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool mkvparser::Match( IMkvReader* pReader, long long& pos, unsigned long id_, unsigned char*& buf, size_t& buflen) { assert(pReader); assert(pos >= 0); long long total, available; long status = pReader->Length(&total, &available); assert(status >= 0); assert((total < 0) || (available <= total)); if (status < 0) return false; long len; const long long id = ReadUInt(pReader, pos, len); assert(id >= 0); assert(len > 0); assert(len <= 8); assert((pos + len) <= available); if ((unsigned long)id != id_) return false; pos += len; //consume id const long long size_ = ReadUInt(pReader, pos, len); assert(size_ >= 0); assert(len > 0); assert(len <= 8); assert((pos + len) <= available); pos += len; //consume length of size of payload assert((pos + size_) <= available); const long buflen_ = static_cast<long>(size_); buf = new (std::nothrow) unsigned char[buflen_]; assert(buf); //TODO status = pReader->Read(pos, buflen_, buf); assert(status == 0); //TODO buflen = buflen_; pos += size_; //consume size of payload return true; } Commit Message: libwebm: Pull from upstream Rolling mkvparser from upstream. Primarily for fixing a bug on parsing failures with certain Opus WebM files. Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae The diff is so huge because there were some style clean ups upstream. But it was ensured that there were no breaking changes when the style clean ups was done upstream. Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039 CWE ID: CWE-119
bool mkvparser::Match( bool mkvparser::Match(IMkvReader* pReader, long long& pos, unsigned long id_, unsigned char*& buf, size_t& buflen) { assert(pReader); assert(pos >= 0); long long total, available; long status = pReader->Length(&total, &available); assert(status >= 0); assert((total < 0) || (available <= total)); if (status < 0) return false; long len; const long long id = ReadUInt(pReader, pos, len); assert(id >= 0); assert(len > 0); assert(len <= 8); assert((pos + len) <= available); if ((unsigned long)id != id_) return false; pos += len; // consume id const long long size_ = ReadUInt(pReader, pos, len); assert(size_ >= 0); assert(len > 0); assert(len <= 8); assert((pos + len) <= available); pos += len; // consume length of size of payload assert((pos + size_) <= available); const long buflen_ = static_cast<long>(size_); buf = new (std::nothrow) unsigned char[buflen_]; assert(buf); // TODO status = pReader->Read(pos, buflen_, buf); assert(status == 0); // TODO buflen = buflen_; pos += size_; // consume size of payload return true; }
174,399
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) { #if WRITE_COMPRESSED_STREAM ++out_frames_; if (pkt->data.frame.pts == 0) write_ivf_file_header(&cfg_, 0, outfile_); write_ivf_frame_header(pkt, outfile_); (void)fwrite(pkt->data.frame.buf, 1, pkt->data.frame.sz, outfile_); #endif } Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478 DO NOT MERGE - libvpx: Pull from upstream Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06 BUG=23452792 Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec CWE ID: CWE-119
virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) { #if WRITE_COMPRESSED_STREAM virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) { ++out_frames_; if (pkt->data.frame.pts == 0) write_ivf_file_header(&cfg_, 0, outfile_); write_ivf_frame_header(pkt, outfile_); (void)fwrite(pkt->data.frame.buf, 1, pkt->data.frame.sz, outfile_); }
174,568
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void MockWebRTCPeerConnectionHandler::createOffer(const WebRTCSessionDescriptionRequest& request, const WebMediaConstraints& constraints) { WebString shouldSucceed; if (constraints.getMandatoryConstraintValue("succeed", shouldSucceed) && shouldSucceed == "true") { WebRTCSessionDescriptionDescriptor sessionDescription; sessionDescription.initialize("offer", "Some SDP here"); postTask(new RTCSessionDescriptionRequestSuccededTask(this, request, sessionDescription)); } else postTask(new RTCSessionDescriptionRequestFailedTask(this, request)); } Commit Message: Unreviewed, rolling out r127612, r127660, and r127664. http://trac.webkit.org/changeset/127612 http://trac.webkit.org/changeset/127660 http://trac.webkit.org/changeset/127664 https://bugs.webkit.org/show_bug.cgi?id=95920 Source/Platform: * Platform.gypi: * chromium/public/WebRTCPeerConnectionHandler.h: (WebKit): (WebRTCPeerConnectionHandler): * chromium/public/WebRTCVoidRequest.h: Removed. Source/WebCore: * CMakeLists.txt: * GNUmakefile.list.am: * Modules/mediastream/RTCErrorCallback.h: (WebCore): (RTCErrorCallback): * Modules/mediastream/RTCErrorCallback.idl: * Modules/mediastream/RTCPeerConnection.cpp: (WebCore::RTCPeerConnection::createOffer): * Modules/mediastream/RTCPeerConnection.h: (WebCore): (RTCPeerConnection): * Modules/mediastream/RTCPeerConnection.idl: * Modules/mediastream/RTCSessionDescriptionCallback.h: (WebCore): (RTCSessionDescriptionCallback): * Modules/mediastream/RTCSessionDescriptionCallback.idl: * Modules/mediastream/RTCSessionDescriptionRequestImpl.cpp: (WebCore::RTCSessionDescriptionRequestImpl::create): (WebCore::RTCSessionDescriptionRequestImpl::RTCSessionDescriptionRequestImpl): (WebCore::RTCSessionDescriptionRequestImpl::requestSucceeded): (WebCore::RTCSessionDescriptionRequestImpl::requestFailed): (WebCore::RTCSessionDescriptionRequestImpl::clear): * Modules/mediastream/RTCSessionDescriptionRequestImpl.h: (RTCSessionDescriptionRequestImpl): * Modules/mediastream/RTCVoidRequestImpl.cpp: Removed. * Modules/mediastream/RTCVoidRequestImpl.h: Removed. * WebCore.gypi: * platform/chromium/support/WebRTCVoidRequest.cpp: Removed. * platform/mediastream/RTCPeerConnectionHandler.cpp: (RTCPeerConnectionHandlerDummy): (WebCore::RTCPeerConnectionHandlerDummy::RTCPeerConnectionHandlerDummy): * platform/mediastream/RTCPeerConnectionHandler.h: (WebCore): (WebCore::RTCPeerConnectionHandler::~RTCPeerConnectionHandler): (RTCPeerConnectionHandler): (WebCore::RTCPeerConnectionHandler::RTCPeerConnectionHandler): * platform/mediastream/RTCVoidRequest.h: Removed. * platform/mediastream/chromium/RTCPeerConnectionHandlerChromium.cpp: * platform/mediastream/chromium/RTCPeerConnectionHandlerChromium.h: (RTCPeerConnectionHandlerChromium): Tools: * DumpRenderTree/chromium/MockWebRTCPeerConnectionHandler.cpp: (MockWebRTCPeerConnectionHandler::SuccessCallbackTask::SuccessCallbackTask): (MockWebRTCPeerConnectionHandler::SuccessCallbackTask::runIfValid): (MockWebRTCPeerConnectionHandler::FailureCallbackTask::FailureCallbackTask): (MockWebRTCPeerConnectionHandler::FailureCallbackTask::runIfValid): (MockWebRTCPeerConnectionHandler::createOffer): * DumpRenderTree/chromium/MockWebRTCPeerConnectionHandler.h: (MockWebRTCPeerConnectionHandler): (SuccessCallbackTask): (FailureCallbackTask): LayoutTests: * fast/mediastream/RTCPeerConnection-createOffer.html: * fast/mediastream/RTCPeerConnection-localDescription-expected.txt: Removed. * fast/mediastream/RTCPeerConnection-localDescription.html: Removed. * fast/mediastream/RTCPeerConnection-remoteDescription-expected.txt: Removed. * fast/mediastream/RTCPeerConnection-remoteDescription.html: Removed. git-svn-id: svn://svn.chromium.org/blink/trunk@127679 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID: CWE-20
void MockWebRTCPeerConnectionHandler::createOffer(const WebRTCSessionDescriptionRequest& request, const WebMediaConstraints& constraints) { WebString shouldSucceed; if (constraints.getMandatoryConstraintValue("succeed", shouldSucceed) && shouldSucceed == "true") { WebRTCSessionDescriptionDescriptor sessionDescription; sessionDescription.initialize("offer", "Some SDP here");
170,359
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: inline void update_rq_clock(struct rq *rq) { if (!rq->skip_clock_update) { int cpu = cpu_of(rq); u64 irq_time; rq->clock = sched_clock_cpu(cpu); irq_time = irq_time_cpu(cpu); if (rq->clock - irq_time > rq->clock_task) rq->clock_task = rq->clock - irq_time; sched_irq_time_avg_update(rq, irq_time); } } Commit Message: Sched: fix skip_clock_update optimization idle_balance() drops/retakes rq->lock, leaving the previous task vulnerable to set_tsk_need_resched(). Clear it after we return from balancing instead, and in setup_thread_stack() as well, so no successfully descheduled or never scheduled task has it set. Need resched confused the skip_clock_update logic, which assumes that the next call to update_rq_clock() will come nearly immediately after being set. Make the optimization robust against the waking a sleeper before it sucessfully deschedules case by checking that the current task has not been dequeued before setting the flag, since it is that useless clock update we're trying to save, and clear unconditionally in schedule() proper instead of conditionally in put_prev_task(). Signed-off-by: Mike Galbraith <[email protected]> Reported-by: Bjoern B. Brandenburg <[email protected]> Tested-by: Yong Zhang <[email protected]> Signed-off-by: Peter Zijlstra <[email protected]> Cc: [email protected] LKML-Reference: <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> CWE ID:
inline void update_rq_clock(struct rq *rq) { int cpu = cpu_of(rq); u64 irq_time; if (rq->skip_clock_update) return; rq->clock = sched_clock_cpu(cpu); irq_time = irq_time_cpu(cpu); if (rq->clock - irq_time > rq->clock_task) rq->clock_task = rq->clock - irq_time; sched_irq_time_avg_update(rq, irq_time); }
165,678
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: GDataFileError GDataFileSystem::AddNewDirectory( const FilePath& directory_path, base::Value* entry_value) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); if (!entry_value) return GDATA_FILE_ERROR_FAILED; scoped_ptr<DocumentEntry> doc_entry(DocumentEntry::CreateFrom(*entry_value)); if (!doc_entry.get()) return GDATA_FILE_ERROR_FAILED; GDataEntry* entry = directory_service_->FindEntryByPathSync(directory_path); if (!entry) return GDATA_FILE_ERROR_FAILED; GDataDirectory* parent_dir = entry->AsGDataDirectory(); if (!parent_dir) return GDATA_FILE_ERROR_FAILED; GDataEntry* new_entry = GDataEntry::FromDocumentEntry( NULL, doc_entry.get(), directory_service_.get()); if (!new_entry) return GDATA_FILE_ERROR_FAILED; parent_dir->AddEntry(new_entry); OnDirectoryChanged(directory_path); return GDATA_FILE_OK; } Commit Message: Remove parent* arg from GDataEntry ctor. * Remove static FromDocumentEntry from GDataEntry, GDataFile, GDataDirectory. Replace with InitFromDocumentEntry. * Move common code from GDataFile::InitFromDocumentEntry and GDataDirectory::InitFromDocumentEntry to GDataEntry::InitFromDocumentEntry. * Add GDataDirectoryService::FromDocumentEntry and use this everywhere. * Make ctors of GDataFile, GDataDirectory private, so these must be created by GDataDirectoryService's CreateGDataFile and CreateGDataDirectory. Make GDataEntry ctor protected. BUG=141494 TEST=unit tests. Review URL: https://chromiumcodereview.appspot.com/10854083 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@151008 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
GDataFileError GDataFileSystem::AddNewDirectory( const FilePath& directory_path, base::Value* entry_value) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); if (!entry_value) return GDATA_FILE_ERROR_FAILED; scoped_ptr<DocumentEntry> doc_entry(DocumentEntry::CreateFrom(*entry_value)); if (!doc_entry.get()) return GDATA_FILE_ERROR_FAILED; GDataEntry* entry = directory_service_->FindEntryByPathSync(directory_path); if (!entry) return GDATA_FILE_ERROR_FAILED; GDataDirectory* parent_dir = entry->AsGDataDirectory(); if (!parent_dir) return GDATA_FILE_ERROR_FAILED; GDataEntry* new_entry = directory_service_->FromDocumentEntry(doc_entry.get()); if (!new_entry) return GDATA_FILE_ERROR_FAILED; parent_dir->AddEntry(new_entry); OnDirectoryChanged(directory_path); return GDATA_FILE_OK; }
171,479
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: TemplateURLRef::SearchTermsArgs::ContextualSearchParams::ContextualSearchParams( int version, size_t start, size_t end, const std::string& selection, const std::string& content, const std::string& base_page_url, const std::string& encoding, int now_on_tap_version) : version(version), start(start), end(end), selection(selection), content(content), base_page_url(base_page_url), encoding(encoding), now_on_tap_version(now_on_tap_version) {} Commit Message: [Contextual Search] Change "Now on Tap" to "Contextual Cards" BUG=644934 Review-Url: https://codereview.chromium.org/2361163003 Cr-Commit-Position: refs/heads/master@{#420899} CWE ID:
TemplateURLRef::SearchTermsArgs::ContextualSearchParams::ContextualSearchParams( int version, size_t start, size_t end, const std::string& selection, const std::string& content, const std::string& base_page_url, const std::string& encoding, int contextual_cards_version) : version(version), start(start), end(end), selection(selection), content(content), base_page_url(base_page_url), encoding(encoding),
171,647
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: TracingControllerImpl::TracingControllerImpl() : delegate_(GetContentClient()->browser()->GetTracingDelegate()), weak_ptr_factory_(this) { DCHECK(!g_tracing_controller); DCHECK_CURRENTLY_ON(BrowserThread::UI); base::FileTracing::SetProvider(new FileTracingProviderImpl); AddAgents(); base::trace_event::TraceLog::GetInstance()->AddAsyncEnabledStateObserver( weak_ptr_factory_.GetWeakPtr()); g_tracing_controller = this; } Commit Message: Tracing: Connect to service on startup Temporary workaround for flaky tests introduced by https://chromium-review.googlesource.com/c/chromium/src/+/1439082 [email protected] Bug: 928410, 928363 Change-Id: I0dcf20cbdf91a7beea167a220ba9ef7e0604c1ab Reviewed-on: https://chromium-review.googlesource.com/c/1452767 Reviewed-by: oysteine <[email protected]> Reviewed-by: Eric Seckler <[email protected]> Reviewed-by: Aaron Gable <[email protected]> Commit-Queue: oysteine <[email protected]> Cr-Commit-Position: refs/heads/master@{#631052} CWE ID: CWE-19
TracingControllerImpl::TracingControllerImpl() : delegate_(GetContentClient()->browser()->GetTracingDelegate()), weak_ptr_factory_(this) { DCHECK(!g_tracing_controller); DCHECK_CURRENTLY_ON(BrowserThread::UI); base::FileTracing::SetProvider(new FileTracingProviderImpl); AddAgents(); base::trace_event::TraceLog::GetInstance()->AddAsyncEnabledStateObserver( weak_ptr_factory_.GetWeakPtr()); g_tracing_controller = this; // TODO(oysteine): Instead of connecting right away, we should connect // in StartTracing once this no longer causes test flakiness. ConnectToServiceIfNeeded(); }
172,057
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void cJSON_DeleteItemFromArray( cJSON *array, int which ) { cJSON_Delete( cJSON_DetachItemFromArray( array, which ) ); } Commit Message: Fix a buffer overflow / heap corruption issue that could occur if a malformed JSON string was passed on the control channel. This issue, present in the cJSON library, was already fixed upstream, so was addressed here in iperf3 by importing a newer version of cJSON (plus local ESnet modifications). Discovered and reported by Dave McDaniel, Cisco Talos. Based on a patch by @dopheide-esnet, with input from @DaveGamble. Cross-references: TALOS-CAN-0164, ESNET-SECADV-2016-0001, CVE-2016-4303 (cherry picked from commit ed94082be27d971a5e1b08b666e2c217cf470a40) Signed-off-by: Bruce A. Mah <[email protected]> CWE ID: CWE-119
void cJSON_DeleteItemFromArray( cJSON *array, int which )
167,282
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void svc_rdma_get_write_arrays(struct rpcrdma_msg *rmsgp, struct rpcrdma_write_array **write, struct rpcrdma_write_array **reply) { __be32 *p; p = (__be32 *)&rmsgp->rm_body.rm_chunks[0]; /* Read list */ while (*p++ != xdr_zero) p += 5; /* Write list */ if (*p != xdr_zero) { *write = (struct rpcrdma_write_array *)p; while (*p++ != xdr_zero) p += 1 + be32_to_cpu(*p) * 4; } else { *write = NULL; p++; } /* Reply chunk */ if (*p != xdr_zero) *reply = (struct rpcrdma_write_array *)p; else *reply = NULL; } Commit Message: Merge tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux Pull nfsd updates from Bruce Fields: "Another RDMA update from Chuck Lever, and a bunch of miscellaneous bugfixes" * tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux: (26 commits) nfsd: Fix up the "supattr_exclcreat" attributes nfsd: encoders mustn't use unitialized values in error cases nfsd: fix undefined behavior in nfsd4_layout_verify lockd: fix lockd shutdown race NFSv4: Fix callback server shutdown SUNRPC: Refactor svc_set_num_threads() NFSv4.x/callback: Create the callback service through svc_create_pooled lockd: remove redundant check on block svcrdma: Clean out old XDR encoders svcrdma: Remove the req_map cache svcrdma: Remove unused RDMA Write completion handler svcrdma: Reduce size of sge array in struct svc_rdma_op_ctxt svcrdma: Clean up RPC-over-RDMA backchannel reply processing svcrdma: Report Write/Reply chunk overruns svcrdma: Clean up RDMA_ERROR path svcrdma: Use rdma_rw API in RPC reply path svcrdma: Introduce local rdma_rw API helpers svcrdma: Clean up svc_rdma_get_inv_rkey() svcrdma: Add helper to save pages under I/O svcrdma: Eliminate RPCRDMA_SQ_DEPTH_MULT ... CWE ID: CWE-404
static void svc_rdma_get_write_arrays(struct rpcrdma_msg *rmsgp, static void svc_rdma_get_write_arrays(__be32 *rdma_argp, __be32 **write, __be32 **reply) { __be32 *p; p = rdma_argp + rpcrdma_fixed_maxsz; /* Read list */ while (*p++ != xdr_zero) p += 5; /* Write list */ if (*p != xdr_zero) { *write = p; while (*p++ != xdr_zero) p += 1 + be32_to_cpu(*p) * 4; } else { *write = NULL; p++; } /* Reply chunk */ if (*p != xdr_zero) *reply = p; else *reply = NULL; }
168,172
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void __udf_read_inode(struct inode *inode) { struct buffer_head *bh = NULL; struct fileEntry *fe; struct extendedFileEntry *efe; uint16_t ident; struct udf_inode_info *iinfo = UDF_I(inode); struct udf_sb_info *sbi = UDF_SB(inode->i_sb); unsigned int link_count; /* * Set defaults, but the inode is still incomplete! * Note: get_new_inode() sets the following on a new inode: * i_sb = sb * i_no = ino * i_flags = sb->s_flags * i_state = 0 * clean_inode(): zero fills and sets * i_count = 1 * i_nlink = 1 * i_op = NULL; */ bh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 0, &ident); if (!bh) { udf_err(inode->i_sb, "(ino %ld) failed !bh\n", inode->i_ino); make_bad_inode(inode); return; } if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE && ident != TAG_IDENT_USE) { udf_err(inode->i_sb, "(ino %ld) failed ident=%d\n", inode->i_ino, ident); brelse(bh); make_bad_inode(inode); return; } fe = (struct fileEntry *)bh->b_data; efe = (struct extendedFileEntry *)bh->b_data; if (fe->icbTag.strategyType == cpu_to_le16(4096)) { struct buffer_head *ibh; ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1, &ident); if (ident == TAG_IDENT_IE && ibh) { struct buffer_head *nbh = NULL; struct kernel_lb_addr loc; struct indirectEntry *ie; ie = (struct indirectEntry *)ibh->b_data; loc = lelb_to_cpu(ie->indirectICB.extLocation); if (ie->indirectICB.extLength && (nbh = udf_read_ptagged(inode->i_sb, &loc, 0, &ident))) { if (ident == TAG_IDENT_FE || ident == TAG_IDENT_EFE) { memcpy(&iinfo->i_location, &loc, sizeof(struct kernel_lb_addr)); brelse(bh); brelse(ibh); brelse(nbh); __udf_read_inode(inode); return; } brelse(nbh); } } brelse(ibh); } else if (fe->icbTag.strategyType != cpu_to_le16(4)) { udf_err(inode->i_sb, "unsupported strategy type: %d\n", le16_to_cpu(fe->icbTag.strategyType)); brelse(bh); make_bad_inode(inode); return; } if (fe->icbTag.strategyType == cpu_to_le16(4)) iinfo->i_strat4096 = 0; else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */ iinfo->i_strat4096 = 1; iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK; iinfo->i_unique = 0; iinfo->i_lenEAttr = 0; iinfo->i_lenExtents = 0; iinfo->i_lenAlloc = 0; iinfo->i_next_alloc_block = 0; iinfo->i_next_alloc_goal = 0; if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) { iinfo->i_efe = 1; iinfo->i_use = 0; if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry))) { make_bad_inode(inode); return; } memcpy(iinfo->i_ext.i_data, bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) { iinfo->i_efe = 0; iinfo->i_use = 0; if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct fileEntry))) { make_bad_inode(inode); return; } memcpy(iinfo->i_ext.i_data, bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry)); } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) { iinfo->i_efe = 0; iinfo->i_use = 1; iinfo->i_lenAlloc = le32_to_cpu( ((struct unallocSpaceEntry *)bh->b_data)-> lengthAllocDescs); if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry))) { make_bad_inode(inode); return; } memcpy(iinfo->i_ext.i_data, bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); return; } read_lock(&sbi->s_cred_lock); i_uid_write(inode, le32_to_cpu(fe->uid)); if (!uid_valid(inode->i_uid) || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET)) inode->i_uid = UDF_SB(inode->i_sb)->s_uid; i_gid_write(inode, le32_to_cpu(fe->gid)); if (!gid_valid(inode->i_gid) || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET)) inode->i_gid = UDF_SB(inode->i_sb)->s_gid; if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY && sbi->s_fmode != UDF_INVALID_MODE) inode->i_mode = sbi->s_fmode; else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY && sbi->s_dmode != UDF_INVALID_MODE) inode->i_mode = sbi->s_dmode; else inode->i_mode = udf_convert_permissions(fe); inode->i_mode &= ~sbi->s_umask; read_unlock(&sbi->s_cred_lock); link_count = le16_to_cpu(fe->fileLinkCount); if (!link_count) link_count = 1; set_nlink(inode, link_count); inode->i_size = le64_to_cpu(fe->informationLength); iinfo->i_lenExtents = inode->i_size; if (iinfo->i_efe == 0) { inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) << (inode->i_sb->s_blocksize_bits - 9); if (!udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime)) inode->i_atime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_mtime, fe->modificationTime)) inode->i_mtime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime)) inode->i_ctime = sbi->s_record_time; iinfo->i_unique = le64_to_cpu(fe->uniqueID); iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr); iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs); iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint); } else { inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) << (inode->i_sb->s_blocksize_bits - 9); if (!udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime)) inode->i_atime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_mtime, efe->modificationTime)) inode->i_mtime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime)) iinfo->i_crtime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime)) inode->i_ctime = sbi->s_record_time; iinfo->i_unique = le64_to_cpu(efe->uniqueID); iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr); iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs); iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint); } switch (fe->icbTag.fileType) { case ICBTAG_FILE_TYPE_DIRECTORY: inode->i_op = &udf_dir_inode_operations; inode->i_fop = &udf_dir_operations; inode->i_mode |= S_IFDIR; inc_nlink(inode); break; case ICBTAG_FILE_TYPE_REALTIME: case ICBTAG_FILE_TYPE_REGULAR: case ICBTAG_FILE_TYPE_UNDEF: case ICBTAG_FILE_TYPE_VAT20: if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) inode->i_data.a_ops = &udf_adinicb_aops; else inode->i_data.a_ops = &udf_aops; inode->i_op = &udf_file_inode_operations; inode->i_fop = &udf_file_operations; inode->i_mode |= S_IFREG; break; case ICBTAG_FILE_TYPE_BLOCK: inode->i_mode |= S_IFBLK; break; case ICBTAG_FILE_TYPE_CHAR: inode->i_mode |= S_IFCHR; break; case ICBTAG_FILE_TYPE_FIFO: init_special_inode(inode, inode->i_mode | S_IFIFO, 0); break; case ICBTAG_FILE_TYPE_SOCKET: init_special_inode(inode, inode->i_mode | S_IFSOCK, 0); break; case ICBTAG_FILE_TYPE_SYMLINK: inode->i_data.a_ops = &udf_symlink_aops; inode->i_op = &udf_symlink_inode_operations; inode->i_mode = S_IFLNK | S_IRWXUGO; break; case ICBTAG_FILE_TYPE_MAIN: udf_debug("METADATA FILE-----\n"); break; case ICBTAG_FILE_TYPE_MIRROR: udf_debug("METADATA MIRROR FILE-----\n"); break; case ICBTAG_FILE_TYPE_BITMAP: udf_debug("METADATA BITMAP FILE-----\n"); break; default: udf_err(inode->i_sb, "(ino %ld) failed unknown file type=%d\n", inode->i_ino, fe->icbTag.fileType); make_bad_inode(inode); return; } if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { struct deviceSpec *dsea = (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1); if (dsea) { init_special_inode(inode, inode->i_mode, MKDEV(le32_to_cpu(dsea->majorDeviceIdent), le32_to_cpu(dsea->minorDeviceIdent))); /* Developer ID ??? */ } else make_bad_inode(inode); } brelse(bh); } Commit Message: udf: Avoid infinite loop when processing indirect ICBs We did not implement any bound on number of indirect ICBs we follow when loading inode. Thus corrupted medium could cause kernel to go into an infinite loop, possibly causing a stack overflow. Fix the possible stack overflow by removing recursion from __udf_read_inode() and limit number of indirect ICBs we follow to avoid infinite loops. Signed-off-by: Jan Kara <[email protected]> CWE ID: CWE-399
static void __udf_read_inode(struct inode *inode) { struct buffer_head *bh = NULL; struct fileEntry *fe; struct extendedFileEntry *efe; uint16_t ident; struct udf_inode_info *iinfo = UDF_I(inode); struct udf_sb_info *sbi = UDF_SB(inode->i_sb); unsigned int link_count; unsigned int indirections = 0; reread: /* * Set defaults, but the inode is still incomplete! * Note: get_new_inode() sets the following on a new inode: * i_sb = sb * i_no = ino * i_flags = sb->s_flags * i_state = 0 * clean_inode(): zero fills and sets * i_count = 1 * i_nlink = 1 * i_op = NULL; */ bh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 0, &ident); if (!bh) { udf_err(inode->i_sb, "(ino %ld) failed !bh\n", inode->i_ino); make_bad_inode(inode); return; } if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE && ident != TAG_IDENT_USE) { udf_err(inode->i_sb, "(ino %ld) failed ident=%d\n", inode->i_ino, ident); brelse(bh); make_bad_inode(inode); return; } fe = (struct fileEntry *)bh->b_data; efe = (struct extendedFileEntry *)bh->b_data; if (fe->icbTag.strategyType == cpu_to_le16(4096)) { struct buffer_head *ibh; ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1, &ident); if (ident == TAG_IDENT_IE && ibh) { struct kernel_lb_addr loc; struct indirectEntry *ie; ie = (struct indirectEntry *)ibh->b_data; loc = lelb_to_cpu(ie->indirectICB.extLocation); if (ie->indirectICB.extLength) { brelse(bh); brelse(ibh); memcpy(&iinfo->i_location, &loc, sizeof(struct kernel_lb_addr)); if (++indirections > UDF_MAX_ICB_NESTING) { udf_err(inode->i_sb, "too many ICBs in ICB hierarchy" " (max %d supported)\n", UDF_MAX_ICB_NESTING); make_bad_inode(inode); return; } goto reread; } } brelse(ibh); } else if (fe->icbTag.strategyType != cpu_to_le16(4)) { udf_err(inode->i_sb, "unsupported strategy type: %d\n", le16_to_cpu(fe->icbTag.strategyType)); brelse(bh); make_bad_inode(inode); return; } if (fe->icbTag.strategyType == cpu_to_le16(4)) iinfo->i_strat4096 = 0; else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */ iinfo->i_strat4096 = 1; iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK; iinfo->i_unique = 0; iinfo->i_lenEAttr = 0; iinfo->i_lenExtents = 0; iinfo->i_lenAlloc = 0; iinfo->i_next_alloc_block = 0; iinfo->i_next_alloc_goal = 0; if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) { iinfo->i_efe = 1; iinfo->i_use = 0; if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry))) { make_bad_inode(inode); return; } memcpy(iinfo->i_ext.i_data, bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) { iinfo->i_efe = 0; iinfo->i_use = 0; if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct fileEntry))) { make_bad_inode(inode); return; } memcpy(iinfo->i_ext.i_data, bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry)); } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) { iinfo->i_efe = 0; iinfo->i_use = 1; iinfo->i_lenAlloc = le32_to_cpu( ((struct unallocSpaceEntry *)bh->b_data)-> lengthAllocDescs); if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry))) { make_bad_inode(inode); return; } memcpy(iinfo->i_ext.i_data, bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); return; } read_lock(&sbi->s_cred_lock); i_uid_write(inode, le32_to_cpu(fe->uid)); if (!uid_valid(inode->i_uid) || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET)) inode->i_uid = UDF_SB(inode->i_sb)->s_uid; i_gid_write(inode, le32_to_cpu(fe->gid)); if (!gid_valid(inode->i_gid) || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET)) inode->i_gid = UDF_SB(inode->i_sb)->s_gid; if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY && sbi->s_fmode != UDF_INVALID_MODE) inode->i_mode = sbi->s_fmode; else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY && sbi->s_dmode != UDF_INVALID_MODE) inode->i_mode = sbi->s_dmode; else inode->i_mode = udf_convert_permissions(fe); inode->i_mode &= ~sbi->s_umask; read_unlock(&sbi->s_cred_lock); link_count = le16_to_cpu(fe->fileLinkCount); if (!link_count) link_count = 1; set_nlink(inode, link_count); inode->i_size = le64_to_cpu(fe->informationLength); iinfo->i_lenExtents = inode->i_size; if (iinfo->i_efe == 0) { inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) << (inode->i_sb->s_blocksize_bits - 9); if (!udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime)) inode->i_atime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_mtime, fe->modificationTime)) inode->i_mtime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime)) inode->i_ctime = sbi->s_record_time; iinfo->i_unique = le64_to_cpu(fe->uniqueID); iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr); iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs); iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint); } else { inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) << (inode->i_sb->s_blocksize_bits - 9); if (!udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime)) inode->i_atime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_mtime, efe->modificationTime)) inode->i_mtime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime)) iinfo->i_crtime = sbi->s_record_time; if (!udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime)) inode->i_ctime = sbi->s_record_time; iinfo->i_unique = le64_to_cpu(efe->uniqueID); iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr); iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs); iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint); } switch (fe->icbTag.fileType) { case ICBTAG_FILE_TYPE_DIRECTORY: inode->i_op = &udf_dir_inode_operations; inode->i_fop = &udf_dir_operations; inode->i_mode |= S_IFDIR; inc_nlink(inode); break; case ICBTAG_FILE_TYPE_REALTIME: case ICBTAG_FILE_TYPE_REGULAR: case ICBTAG_FILE_TYPE_UNDEF: case ICBTAG_FILE_TYPE_VAT20: if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) inode->i_data.a_ops = &udf_adinicb_aops; else inode->i_data.a_ops = &udf_aops; inode->i_op = &udf_file_inode_operations; inode->i_fop = &udf_file_operations; inode->i_mode |= S_IFREG; break; case ICBTAG_FILE_TYPE_BLOCK: inode->i_mode |= S_IFBLK; break; case ICBTAG_FILE_TYPE_CHAR: inode->i_mode |= S_IFCHR; break; case ICBTAG_FILE_TYPE_FIFO: init_special_inode(inode, inode->i_mode | S_IFIFO, 0); break; case ICBTAG_FILE_TYPE_SOCKET: init_special_inode(inode, inode->i_mode | S_IFSOCK, 0); break; case ICBTAG_FILE_TYPE_SYMLINK: inode->i_data.a_ops = &udf_symlink_aops; inode->i_op = &udf_symlink_inode_operations; inode->i_mode = S_IFLNK | S_IRWXUGO; break; case ICBTAG_FILE_TYPE_MAIN: udf_debug("METADATA FILE-----\n"); break; case ICBTAG_FILE_TYPE_MIRROR: udf_debug("METADATA MIRROR FILE-----\n"); break; case ICBTAG_FILE_TYPE_BITMAP: udf_debug("METADATA BITMAP FILE-----\n"); break; default: udf_err(inode->i_sb, "(ino %ld) failed unknown file type=%d\n", inode->i_ino, fe->icbTag.fileType); make_bad_inode(inode); return; } if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { struct deviceSpec *dsea = (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1); if (dsea) { init_special_inode(inode, inode->i_mode, MKDEV(le32_to_cpu(dsea->majorDeviceIdent), le32_to_cpu(dsea->minorDeviceIdent))); /* Developer ID ??? */ } else make_bad_inode(inode); } brelse(bh); }
166,266
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt, int is_vp7) { VP8Context *s = avctx->priv_data; int ret, i, referenced, num_jobs; enum AVDiscard skip_thresh; VP8Frame *av_uninit(curframe), *prev_frame; if (is_vp7) ret = vp7_decode_frame_header(s, avpkt->data, avpkt->size); else ret = vp8_decode_frame_header(s, avpkt->data, avpkt->size); if (ret < 0) goto err; prev_frame = s->framep[VP56_FRAME_CURRENT]; referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT || s->update_altref == VP56_FRAME_CURRENT; skip_thresh = !referenced ? AVDISCARD_NONREF : !s->keyframe ? AVDISCARD_NONKEY : AVDISCARD_ALL; if (avctx->skip_frame >= skip_thresh) { s->invisible = 1; memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4); goto skip_decode; } s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh; for (i = 0; i < 5; i++) if (s->frames[i].tf.f->data[0] && &s->frames[i] != prev_frame && &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] && &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] && &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) vp8_release_frame(s, &s->frames[i]); curframe = s->framep[VP56_FRAME_CURRENT] = vp8_find_free_buffer(s); if (!s->colorspace) avctx->colorspace = AVCOL_SPC_BT470BG; if (s->fullrange) avctx->color_range = AVCOL_RANGE_JPEG; else avctx->color_range = AVCOL_RANGE_MPEG; /* Given that arithmetic probabilities are updated every frame, it's quite * likely that the values we have on a random interframe are complete * junk if we didn't start decode on a keyframe. So just don't display * anything rather than junk. */ if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] || !s->framep[VP56_FRAME_GOLDEN] || !s->framep[VP56_FRAME_GOLDEN2])) { av_log(avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n"); ret = AVERROR_INVALIDDATA; goto err; } curframe->tf.f->key_frame = s->keyframe; curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; if ((ret = vp8_alloc_frame(s, curframe, referenced)) < 0) goto err; if (s->update_altref != VP56_FRAME_NONE) s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[s->update_altref]; else s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[VP56_FRAME_GOLDEN2]; if (s->update_golden != VP56_FRAME_NONE) s->next_framep[VP56_FRAME_GOLDEN] = s->framep[s->update_golden]; else s->next_framep[VP56_FRAME_GOLDEN] = s->framep[VP56_FRAME_GOLDEN]; if (s->update_last) s->next_framep[VP56_FRAME_PREVIOUS] = curframe; else s->next_framep[VP56_FRAME_PREVIOUS] = s->framep[VP56_FRAME_PREVIOUS]; s->next_framep[VP56_FRAME_CURRENT] = curframe; if (avctx->codec->update_thread_context) ff_thread_finish_setup(avctx); s->linesize = curframe->tf.f->linesize[0]; s->uvlinesize = curframe->tf.f->linesize[1]; memset(s->top_nnz, 0, s->mb_width * sizeof(*s->top_nnz)); /* Zero macroblock structures for top/top-left prediction * from outside the frame. */ if (!s->mb_layout) memset(s->macroblocks + s->mb_height * 2 - 1, 0, (s->mb_width + 1) * sizeof(*s->macroblocks)); if (!s->mb_layout && s->keyframe) memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width * 4); memset(s->ref_count, 0, sizeof(s->ref_count)); if (s->mb_layout == 1) { if (prev_frame && s->segmentation.enabled && !s->segmentation.update_map) ff_thread_await_progress(&prev_frame->tf, 1, 0); if (is_vp7) vp7_decode_mv_mb_modes(avctx, curframe, prev_frame); else vp8_decode_mv_mb_modes(avctx, curframe, prev_frame); } if (avctx->active_thread_type == FF_THREAD_FRAME) num_jobs = 1; else num_jobs = FFMIN(s->num_coeff_partitions, avctx->thread_count); s->num_jobs = num_jobs; s->curframe = curframe; s->prev_frame = prev_frame; s->mv_bounds.mv_min.y = -MARGIN; s->mv_bounds.mv_max.y = ((s->mb_height - 1) << 6) + MARGIN; for (i = 0; i < MAX_THREADS; i++) { VP8ThreadData *td = &s->thread_data[i]; atomic_init(&td->thread_mb_pos, 0); atomic_init(&td->wait_mb_pos, INT_MAX); } if (is_vp7) avctx->execute2(avctx, vp7_decode_mb_row_sliced, s->thread_data, NULL, num_jobs); else avctx->execute2(avctx, vp8_decode_mb_row_sliced, s->thread_data, NULL, num_jobs); ff_thread_report_progress(&curframe->tf, INT_MAX, 0); memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4); skip_decode: if (!s->update_probabilities) s->prob[0] = s->prob[1]; if (!s->invisible) { if ((ret = av_frame_ref(data, curframe->tf.f)) < 0) return ret; *got_frame = 1; } return avpkt->size; err: memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4); return ret; } Commit Message: avcodec/webp: Always set pix_fmt Fixes: out of array access Fixes: 1434/clusterfuzz-testcase-minimized-6314998085189632 Fixes: 1435/clusterfuzz-testcase-minimized-6483783723253760 Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/targets/ffmpeg Reviewed-by: "Ronald S. Bultje" <[email protected]> Signed-off-by: Michael Niedermayer <[email protected]> CWE ID: CWE-119
int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt, int is_vp7) { VP8Context *s = avctx->priv_data; int ret, i, referenced, num_jobs; enum AVDiscard skip_thresh; VP8Frame *av_uninit(curframe), *prev_frame; av_assert0(avctx->pix_fmt == AV_PIX_FMT_YUVA420P || avctx->pix_fmt == AV_PIX_FMT_YUV420P); if (is_vp7) ret = vp7_decode_frame_header(s, avpkt->data, avpkt->size); else ret = vp8_decode_frame_header(s, avpkt->data, avpkt->size); if (ret < 0) goto err; prev_frame = s->framep[VP56_FRAME_CURRENT]; referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT || s->update_altref == VP56_FRAME_CURRENT; skip_thresh = !referenced ? AVDISCARD_NONREF : !s->keyframe ? AVDISCARD_NONKEY : AVDISCARD_ALL; if (avctx->skip_frame >= skip_thresh) { s->invisible = 1; memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4); goto skip_decode; } s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh; for (i = 0; i < 5; i++) if (s->frames[i].tf.f->data[0] && &s->frames[i] != prev_frame && &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] && &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] && &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) vp8_release_frame(s, &s->frames[i]); curframe = s->framep[VP56_FRAME_CURRENT] = vp8_find_free_buffer(s); if (!s->colorspace) avctx->colorspace = AVCOL_SPC_BT470BG; if (s->fullrange) avctx->color_range = AVCOL_RANGE_JPEG; else avctx->color_range = AVCOL_RANGE_MPEG; /* Given that arithmetic probabilities are updated every frame, it's quite * likely that the values we have on a random interframe are complete * junk if we didn't start decode on a keyframe. So just don't display * anything rather than junk. */ if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] || !s->framep[VP56_FRAME_GOLDEN] || !s->framep[VP56_FRAME_GOLDEN2])) { av_log(avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n"); ret = AVERROR_INVALIDDATA; goto err; } curframe->tf.f->key_frame = s->keyframe; curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; if ((ret = vp8_alloc_frame(s, curframe, referenced)) < 0) goto err; if (s->update_altref != VP56_FRAME_NONE) s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[s->update_altref]; else s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[VP56_FRAME_GOLDEN2]; if (s->update_golden != VP56_FRAME_NONE) s->next_framep[VP56_FRAME_GOLDEN] = s->framep[s->update_golden]; else s->next_framep[VP56_FRAME_GOLDEN] = s->framep[VP56_FRAME_GOLDEN]; if (s->update_last) s->next_framep[VP56_FRAME_PREVIOUS] = curframe; else s->next_framep[VP56_FRAME_PREVIOUS] = s->framep[VP56_FRAME_PREVIOUS]; s->next_framep[VP56_FRAME_CURRENT] = curframe; if (avctx->codec->update_thread_context) ff_thread_finish_setup(avctx); s->linesize = curframe->tf.f->linesize[0]; s->uvlinesize = curframe->tf.f->linesize[1]; memset(s->top_nnz, 0, s->mb_width * sizeof(*s->top_nnz)); /* Zero macroblock structures for top/top-left prediction * from outside the frame. */ if (!s->mb_layout) memset(s->macroblocks + s->mb_height * 2 - 1, 0, (s->mb_width + 1) * sizeof(*s->macroblocks)); if (!s->mb_layout && s->keyframe) memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width * 4); memset(s->ref_count, 0, sizeof(s->ref_count)); if (s->mb_layout == 1) { if (prev_frame && s->segmentation.enabled && !s->segmentation.update_map) ff_thread_await_progress(&prev_frame->tf, 1, 0); if (is_vp7) vp7_decode_mv_mb_modes(avctx, curframe, prev_frame); else vp8_decode_mv_mb_modes(avctx, curframe, prev_frame); } if (avctx->active_thread_type == FF_THREAD_FRAME) num_jobs = 1; else num_jobs = FFMIN(s->num_coeff_partitions, avctx->thread_count); s->num_jobs = num_jobs; s->curframe = curframe; s->prev_frame = prev_frame; s->mv_bounds.mv_min.y = -MARGIN; s->mv_bounds.mv_max.y = ((s->mb_height - 1) << 6) + MARGIN; for (i = 0; i < MAX_THREADS; i++) { VP8ThreadData *td = &s->thread_data[i]; atomic_init(&td->thread_mb_pos, 0); atomic_init(&td->wait_mb_pos, INT_MAX); } if (is_vp7) avctx->execute2(avctx, vp7_decode_mb_row_sliced, s->thread_data, NULL, num_jobs); else avctx->execute2(avctx, vp8_decode_mb_row_sliced, s->thread_data, NULL, num_jobs); ff_thread_report_progress(&curframe->tf, INT_MAX, 0); memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4); skip_decode: if (!s->update_probabilities) s->prob[0] = s->prob[1]; if (!s->invisible) { if ((ret = av_frame_ref(data, curframe->tf.f)) < 0) return ret; *got_frame = 1; } return avpkt->size; err: memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4); return ret; }
168,071
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool MessageLoop::DoWork() { if (!nestable_tasks_allowed_) { return false; } for (;;) { ReloadWorkQueue(); if (work_queue_.empty()) break; do { PendingTask pending_task = std::move(work_queue_.front()); work_queue_.pop(); if (pending_task.task.IsCancelled()) { #if defined(OS_WIN) DecrementHighResTaskCountIfNeeded(pending_task); #endif } else if (!pending_task.delayed_run_time.is_null()) { int sequence_num = pending_task.sequence_num; TimeTicks delayed_run_time = pending_task.delayed_run_time; AddToDelayedWorkQueue(std::move(pending_task)); if (delayed_work_queue_.top().sequence_num == sequence_num) pump_->ScheduleDelayedWork(delayed_run_time); } else { if (DeferOrRunPendingTask(std::move(pending_task))) return true; } } while (!work_queue_.empty()); } return false; } Commit Message: Introduce RunLoop::Type::NESTABLE_TASKS_ALLOWED to replace MessageLoop::ScopedNestableTaskAllower. (as well as MessageLoop::SetNestableTasksAllowed()) Surveying usage: the scoped object is always instantiated right before RunLoop().Run(). The intent is really to allow nestable tasks in that RunLoop so it's better to explicitly label that RunLoop as such and it allows us to break the last dependency that forced some RunLoop users to use MessageLoop APIs. There's also the odd case of allowing nestable tasks for loops that are reentrant from a native task (without going through RunLoop), these are the minority but will have to be handled (after cleaning up the majority of cases that are RunLoop induced). As highlighted by robliao@ in https://chromium-review.googlesource.com/c/600517 (which was merged in this CL). [email protected] Bug: 750779 Change-Id: I43d122c93ec903cff3a6fe7b77ec461ea0656448 Reviewed-on: https://chromium-review.googlesource.com/594713 Commit-Queue: Gabriel Charette <[email protected]> Reviewed-by: Robert Liao <[email protected]> Reviewed-by: danakj <[email protected]> Cr-Commit-Position: refs/heads/master@{#492263} CWE ID:
bool MessageLoop::DoWork() { if (!NestableTasksAllowed()) { return false; } for (;;) { ReloadWorkQueue(); if (work_queue_.empty()) break; do { PendingTask pending_task = std::move(work_queue_.front()); work_queue_.pop(); if (pending_task.task.IsCancelled()) { #if defined(OS_WIN) DecrementHighResTaskCountIfNeeded(pending_task); #endif } else if (!pending_task.delayed_run_time.is_null()) { int sequence_num = pending_task.sequence_num; TimeTicks delayed_run_time = pending_task.delayed_run_time; AddToDelayedWorkQueue(std::move(pending_task)); if (delayed_work_queue_.top().sequence_num == sequence_num) pump_->ScheduleDelayedWork(delayed_run_time); } else { if (DeferOrRunPendingTask(std::move(pending_task))) return true; } } while (!work_queue_.empty()); } return false; }
171,864
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void DiscardAndExplicitlyReloadTest(DiscardReason reason) { LifecycleUnit* background_lifecycle_unit = nullptr; LifecycleUnit* foreground_lifecycle_unit = nullptr; CreateTwoTabs(true /* focus_tab_strip */, &background_lifecycle_unit, &foreground_lifecycle_unit); content::WebContents* initial_web_contents = tab_strip_model_->GetWebContentsAt(0); EXPECT_EQ(LifecycleUnitState::ACTIVE, background_lifecycle_unit->GetState()); EXPECT_CALL(tab_observer_, OnDiscardedStateChange(testing::_, true)); background_lifecycle_unit->Discard(reason); testing::Mock::VerifyAndClear(&tab_observer_); TransitionFromPendingDiscardToDiscardedIfNeeded(reason, background_lifecycle_unit); EXPECT_NE(initial_web_contents, tab_strip_model_->GetWebContentsAt(0)); EXPECT_FALSE(tab_strip_model_->GetWebContentsAt(0) ->GetController() .GetPendingEntry()); EXPECT_CALL(tab_observer_, OnDiscardedStateChange(testing::_, false)); tab_strip_model_->GetWebContentsAt(0)->GetController().Reload( content::ReloadType::NORMAL, false); testing::Mock::VerifyAndClear(&tab_observer_); EXPECT_EQ(LifecycleUnitState::ACTIVE, background_lifecycle_unit->GetState()); EXPECT_TRUE(tab_strip_model_->GetWebContentsAt(0) ->GetController() .GetPendingEntry()); } Commit Message: Connect the LocalDB to TabManager. Bug: 773382 Change-Id: Iec8fe5226ee175105d51f300f30b4865478ac099 Reviewed-on: https://chromium-review.googlesource.com/1118611 Commit-Queue: Sébastien Marchand <[email protected]> Reviewed-by: François Doray <[email protected]> Cr-Commit-Position: refs/heads/master@{#572871} CWE ID:
void DiscardAndExplicitlyReloadTest(DiscardReason reason) { LifecycleUnit* background_lifecycle_unit = nullptr; LifecycleUnit* foreground_lifecycle_unit = nullptr; CreateTwoTabs(true /* focus_tab_strip */, &background_lifecycle_unit, &foreground_lifecycle_unit); content::WebContents* initial_web_contents = tab_strip_model_->GetWebContentsAt(0); EXPECT_EQ(LifecycleUnitState::ACTIVE, background_lifecycle_unit->GetState()); EXPECT_CALL(tab_observer_, OnDiscardedStateChange(::testing::_, true)); background_lifecycle_unit->Discard(reason); ::testing::Mock::VerifyAndClear(&tab_observer_); TransitionFromPendingDiscardToDiscardedIfNeeded(reason, background_lifecycle_unit); EXPECT_NE(initial_web_contents, tab_strip_model_->GetWebContentsAt(0)); EXPECT_FALSE(tab_strip_model_->GetWebContentsAt(0) ->GetController() .GetPendingEntry()); EXPECT_CALL(tab_observer_, OnDiscardedStateChange(::testing::_, false)); tab_strip_model_->GetWebContentsAt(0)->GetController().Reload( content::ReloadType::NORMAL, false); ::testing::Mock::VerifyAndClear(&tab_observer_); EXPECT_EQ(LifecycleUnitState::ACTIVE, background_lifecycle_unit->GetState()); EXPECT_TRUE(tab_strip_model_->GetWebContentsAt(0) ->GetController() .GetPendingEntry()); }
172,225
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: Resource::Resource(PluginInstance* instance) : resource_id_(0), instance_(instance) { } Commit Message: Maintain a map of all resources in the resource tracker and clear instance back pointers when needed, BUG=85808 Review URL: http://codereview.chromium.org/7196001 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@89746 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
Resource::Resource(PluginInstance* instance) : resource_id_(0), instance_(instance) { ResourceTracker::Get()->ResourceCreated(this, instance_); }
170,414
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, int open_flags) { if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) memcpy(state->stateid.data, stateid->data, sizeof(state->stateid.data)); memcpy(state->open_stateid.data, stateid->data, sizeof(state->open_stateid.data)); switch (open_flags) { case FMODE_READ: set_bit(NFS_O_RDONLY_STATE, &state->flags); break; case FMODE_WRITE: set_bit(NFS_O_WRONLY_STATE, &state->flags); break; case FMODE_READ|FMODE_WRITE: set_bit(NFS_O_RDWR_STATE, &state->flags); } } Commit Message: NFSv4: Convert the open and close ops to use fmode Signed-off-by: Trond Myklebust <[email protected]> CWE ID:
static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, int open_flags) static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) { if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) memcpy(state->stateid.data, stateid->data, sizeof(state->stateid.data)); memcpy(state->open_stateid.data, stateid->data, sizeof(state->open_stateid.data)); switch (fmode) { case FMODE_READ: set_bit(NFS_O_RDONLY_STATE, &state->flags); break; case FMODE_WRITE: set_bit(NFS_O_WRONLY_STATE, &state->flags); break; case FMODE_READ|FMODE_WRITE: set_bit(NFS_O_RDWR_STATE, &state->flags); } }
165,706
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int hugetlb_get_quota(struct address_space *mapping, long delta) { int ret = 0; struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb); if (sbinfo->free_blocks > -1) { spin_lock(&sbinfo->stat_lock); if (sbinfo->free_blocks - delta >= 0) sbinfo->free_blocks -= delta; else ret = -ENOMEM; spin_unlock(&sbinfo->stat_lock); } return ret; } Commit Message: hugepages: fix use after free bug in "quota" handling hugetlbfs_{get,put}_quota() are badly named. They don't interact with the general quota handling code, and they don't much resemble its behaviour. Rather than being about maintaining limits on on-disk block usage by particular users, they are instead about maintaining limits on in-memory page usage (including anonymous MAP_PRIVATE copied-on-write pages) associated with a particular hugetlbfs filesystem instance. Worse, they work by having callbacks to the hugetlbfs filesystem code from the low-level page handling code, in particular from free_huge_page(). This is a layering violation of itself, but more importantly, if the kernel does a get_user_pages() on hugepages (which can happen from KVM amongst others), then the free_huge_page() can be delayed until after the associated inode has already been freed. If an unmount occurs at the wrong time, even the hugetlbfs superblock where the "quota" limits are stored may have been freed. Andrew Barry proposed a patch to fix this by having hugepages, instead of storing a pointer to their address_space and reaching the superblock from there, had the hugepages store pointers directly to the superblock, bumping the reference count as appropriate to avoid it being freed. Andrew Morton rejected that version, however, on the grounds that it made the existing layering violation worse. This is a reworked version of Andrew's patch, which removes the extra, and some of the existing, layering violation. It works by introducing the concept of a hugepage "subpool" at the lower hugepage mm layer - that is a finite logical pool of hugepages to allocate from. hugetlbfs now creates a subpool for each filesystem instance with a page limit set, and a pointer to the subpool gets added to each allocated hugepage, instead of the address_space pointer used now. The subpool has its own lifetime and is only freed once all pages in it _and_ all other references to it (i.e. superblocks) are gone. subpools are optional - a NULL subpool pointer is taken by the code to mean that no subpool limits are in effect. Previous discussion of this bug found in: "Fix refcounting in hugetlbfs quota handling.". See: https://lkml.org/lkml/2011/8/11/28 or http://marc.info/?l=linux-mm&m=126928970510627&w=1 v2: Fixed a bug spotted by Hillf Danton, and removed the extra parameter to alloc_huge_page() - since it already takes the vma, it is not necessary. Signed-off-by: Andrew Barry <[email protected]> Signed-off-by: David Gibson <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Hillf Danton <[email protected]> Cc: Paul Mackerras <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> CWE ID: CWE-399
int hugetlb_get_quota(struct address_space *mapping, long delta)
165,603
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void FileBrowserHandlerCustomBindings::GetExternalFileEntry( const v8::FunctionCallbackInfo<v8::Value>& args) { //// TODO(zelidrag): Make this magic work on other platforms when file browser //// matures enough on ChromeOS. #if defined(OS_CHROMEOS) CHECK(args.Length() == 1); CHECK(args[0]->IsObject()); v8::Local<v8::Object> file_def = args[0]->ToObject(); std::string file_system_name( *v8::String::Utf8Value(file_def->Get( v8::String::NewFromUtf8(args.GetIsolate(), "fileSystemName")))); GURL file_system_root( *v8::String::Utf8Value(file_def->Get( v8::String::NewFromUtf8(args.GetIsolate(), "fileSystemRoot")))); std::string file_full_path( *v8::String::Utf8Value(file_def->Get( v8::String::NewFromUtf8(args.GetIsolate(), "fileFullPath")))); bool is_directory = file_def->Get(v8::String::NewFromUtf8( args.GetIsolate(), "fileIsDirectory"))->ToBoolean()->Value(); blink::WebDOMFileSystem::EntryType entry_type = is_directory ? blink::WebDOMFileSystem::EntryTypeDirectory : blink::WebDOMFileSystem::EntryTypeFile; blink::WebLocalFrame* webframe = blink::WebLocalFrame::frameForContext(context()->v8_context()); args.GetReturnValue().Set( blink::WebDOMFileSystem::create( webframe, blink::WebFileSystemTypeExternal, blink::WebString::fromUTF8(file_system_name), file_system_root) .createV8Entry(blink::WebString::fromUTF8(file_full_path), entry_type, args.Holder(), args.GetIsolate())); #endif } Commit Message: [Extensions] Add more bindings access checks BUG=598165 Review URL: https://codereview.chromium.org/1854983002 Cr-Commit-Position: refs/heads/master@{#385282} CWE ID:
void FileBrowserHandlerCustomBindings::GetExternalFileEntry( const v8::FunctionCallbackInfo<v8::Value>& args, ScriptContext* context) { //// TODO(zelidrag): Make this magic work on other platforms when file browser //// matures enough on ChromeOS. #if defined(OS_CHROMEOS) CHECK(args.Length() == 1); CHECK(args[0]->IsObject()); v8::Local<v8::Object> file_def = args[0]->ToObject(); std::string file_system_name( *v8::String::Utf8Value(file_def->Get( v8::String::NewFromUtf8(args.GetIsolate(), "fileSystemName")))); GURL file_system_root( *v8::String::Utf8Value(file_def->Get( v8::String::NewFromUtf8(args.GetIsolate(), "fileSystemRoot")))); std::string file_full_path( *v8::String::Utf8Value(file_def->Get( v8::String::NewFromUtf8(args.GetIsolate(), "fileFullPath")))); bool is_directory = file_def->Get(v8::String::NewFromUtf8( args.GetIsolate(), "fileIsDirectory"))->ToBoolean()->Value(); blink::WebDOMFileSystem::EntryType entry_type = is_directory ? blink::WebDOMFileSystem::EntryTypeDirectory : blink::WebDOMFileSystem::EntryTypeFile; blink::WebLocalFrame* webframe = blink::WebLocalFrame::frameForContext(context->v8_context()); args.GetReturnValue().Set( blink::WebDOMFileSystem::create( webframe, blink::WebFileSystemTypeExternal, blink::WebString::fromUTF8(file_system_name), file_system_root) .createV8Entry(blink::WebString::fromUTF8(file_full_path), entry_type, args.Holder(), args.GetIsolate())); #endif }
173,273
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: png_write_find_filter(png_structp png_ptr, png_row_infop row_info) { png_bytep best_row; #ifdef PNG_WRITE_FILTER_SUPPORTED png_bytep prev_row, row_buf; png_uint_32 mins, bpp; png_byte filter_to_do = png_ptr->do_filter; png_uint_32 row_bytes = row_info->rowbytes; #ifdef PNG_WRITE_WEIGHTED_FILTER_SUPPORTED int num_p_filters = (int)png_ptr->num_prev_filters; #endif png_debug(1, "in png_write_find_filter"); #ifndef PNG_WRITE_WEIGHTED_FILTER_SUPPORTED if (png_ptr->row_number == 0 && filter_to_do == PNG_ALL_FILTERS) { /* These will never be selected so we need not test them. */ filter_to_do &= ~(PNG_FILTER_UP | PNG_FILTER_PAETH); } #endif /* Find out how many bytes offset each pixel is */ bpp = (row_info->pixel_depth + 7) >> 3; prev_row = png_ptr->prev_row; #endif best_row = png_ptr->row_buf; #ifdef PNG_WRITE_FILTER_SUPPORTED row_buf = best_row; mins = PNG_MAXSUM; /* The prediction method we use is to find which method provides the * smallest value when summing the absolute values of the distances * from zero, using anything >= 128 as negative numbers. This is known * as the "minimum sum of absolute differences" heuristic. Other * heuristics are the "weighted minimum sum of absolute differences" * (experimental and can in theory improve compression), and the "zlib * predictive" method (not implemented yet), which does test compressions * of lines using different filter methods, and then chooses the * (series of) filter(s) that give minimum compressed data size (VERY * computationally expensive). * * GRR 980525: consider also * (1) minimum sum of absolute differences from running average (i.e., * keep running sum of non-absolute differences & count of bytes) * [track dispersion, too? restart average if dispersion too large?] * (1b) minimum sum of absolute differences from sliding average, probably * with window size <= deflate window (usually 32K) * (2) minimum sum of squared differences from zero or running average * (i.e., ~ root-mean-square approach) */ /* We don't need to test the 'no filter' case if this is the only filter * that has been chosen, as it doesn't actually do anything to the data. */ if ((filter_to_do & PNG_FILTER_NONE) && filter_to_do != PNG_FILTER_NONE) { png_bytep rp; png_uint_32 sum = 0; png_uint_32 i; int v; for (i = 0, rp = row_buf + 1; i < row_bytes; i++, rp++) { v = *rp; sum += (v < 128) ? v : 256 - v; } #ifdef PNG_WRITE_WEIGHTED_FILTER_SUPPORTED if (png_ptr->heuristic_method == PNG_FILTER_HEURISTIC_WEIGHTED) { png_uint_32 sumhi, sumlo; int j; sumlo = sum & PNG_LOMASK; sumhi = (sum >> PNG_HISHIFT) & PNG_HIMASK; /* Gives us some footroom */ /* Reduce the sum if we match any of the previous rows */ for (j = 0; j < num_p_filters; j++) { if (png_ptr->prev_filters[j] == PNG_FILTER_VALUE_NONE) { sumlo = (sumlo * png_ptr->filter_weights[j]) >> PNG_WEIGHT_SHIFT; sumhi = (sumhi * png_ptr->filter_weights[j]) >> PNG_WEIGHT_SHIFT; } } /* Factor in the cost of this filter (this is here for completeness, * but it makes no sense to have a "cost" for the NONE filter, as * it has the minimum possible computational cost - none). */ sumlo = (sumlo * png_ptr->filter_costs[PNG_FILTER_VALUE_NONE]) >> PNG_COST_SHIFT; sumhi = (sumhi * png_ptr->filter_costs[PNG_FILTER_VALUE_NONE]) >> PNG_COST_SHIFT; if (sumhi > PNG_HIMASK) sum = PNG_MAXSUM; else sum = (sumhi << PNG_HISHIFT) + sumlo; } #endif mins = sum; } /* Sub filter */ if (filter_to_do == PNG_FILTER_SUB) /* It's the only filter so no testing is needed */ { png_bytep rp, lp, dp; png_uint_32 i; for (i = 0, rp = row_buf + 1, dp = png_ptr->sub_row + 1; i < bpp; i++, rp++, dp++) { *dp = *rp; } for (lp = row_buf + 1; i < row_bytes; i++, rp++, lp++, dp++) { *dp = (png_byte)(((int)*rp - (int)*lp) & 0xff); } best_row = png_ptr->sub_row; } else if (filter_to_do & PNG_FILTER_SUB) { png_bytep rp, dp, lp; png_uint_32 sum = 0, lmins = mins; png_uint_32 i; int v; #ifdef PNG_WRITE_WEIGHTED_FILTER_SUPPORTED /* We temporarily increase the "minimum sum" by the factor we * would reduce the sum of this filter, so that we can do the * early exit comparison without scaling the sum each time. */ if (png_ptr->heuristic_method == PNG_FILTER_HEURISTIC_WEIGHTED) { int j; png_uint_32 lmhi, lmlo; lmlo = lmins & PNG_LOMASK; lmhi = (lmins >> PNG_HISHIFT) & PNG_HIMASK; for (j = 0; j < num_p_filters; j++) { if (png_ptr->prev_filters[j] == PNG_FILTER_VALUE_SUB) { lmlo = (lmlo * png_ptr->inv_filter_weights[j]) >> PNG_WEIGHT_SHIFT; lmhi = (lmhi * png_ptr->inv_filter_weights[j]) >> PNG_WEIGHT_SHIFT; } } lmlo = (lmlo * png_ptr->inv_filter_costs[PNG_FILTER_VALUE_SUB]) >> PNG_COST_SHIFT; lmhi = (lmhi * png_ptr->inv_filter_costs[PNG_FILTER_VALUE_SUB]) >> PNG_COST_SHIFT; if (lmhi > PNG_HIMASK) lmins = PNG_MAXSUM; else lmins = (lmhi << PNG_HISHIFT) + lmlo; } #endif for (i = 0, rp = row_buf + 1, dp = png_ptr->sub_row + 1; i < bpp; i++, rp++, dp++) { v = *dp = *rp; sum += (v < 128) ? v : 256 - v; } for (lp = row_buf + 1; i < row_bytes; i++, rp++, lp++, dp++) { v = *dp = (png_byte)(((int)*rp - (int)*lp) & 0xff); sum += (v < 128) ? v : 256 - v; if (sum > lmins) /* We are already worse, don't continue. */ break; } #ifdef PNG_WRITE_WEIGHTED_FILTER_SUPPORTED if (png_ptr->heuristic_method == PNG_FILTER_HEURISTIC_WEIGHTED) { int j; png_uint_32 sumhi, sumlo; sumlo = sum & PNG_LOMASK; sumhi = (sum >> PNG_HISHIFT) & PNG_HIMASK; for (j = 0; j < num_p_filters; j++) { if (png_ptr->prev_filters[j] == PNG_FILTER_VALUE_SUB) { sumlo = (sumlo * png_ptr->inv_filter_weights[j]) >> PNG_WEIGHT_SHIFT; sumhi = (sumhi * png_ptr->inv_filter_weights[j]) >> PNG_WEIGHT_SHIFT; } } sumlo = (sumlo * png_ptr->inv_filter_costs[PNG_FILTER_VALUE_SUB]) >> PNG_COST_SHIFT; sumhi = (sumhi * png_ptr->inv_filter_costs[PNG_FILTER_VALUE_SUB]) >> PNG_COST_SHIFT; if (sumhi > PNG_HIMASK) sum = PNG_MAXSUM; else sum = (sumhi << PNG_HISHIFT) + sumlo; } #endif if (sum < mins) { mins = sum; best_row = png_ptr->sub_row; } } /* Up filter */ if (filter_to_do == PNG_FILTER_UP) { png_bytep rp, dp, pp; png_uint_32 i; for (i = 0, rp = row_buf + 1, dp = png_ptr->up_row + 1, pp = prev_row + 1; i < row_bytes; i++, rp++, pp++, dp++) { *dp = (png_byte)(((int)*rp - (int)*pp) & 0xff); } best_row = png_ptr->up_row; } else if (filter_to_do & PNG_FILTER_UP) { png_bytep rp, dp, pp; png_uint_32 sum = 0, lmins = mins; png_uint_32 i; int v; #ifdef PNG_WRITE_WEIGHTED_FILTER_SUPPORTED if (png_ptr->heuristic_method == PNG_FILTER_HEURISTIC_WEIGHTED) { int j; png_uint_32 lmhi, lmlo; lmlo = lmins & PNG_LOMASK; lmhi = (lmins >> PNG_HISHIFT) & PNG_HIMASK; for (j = 0; j < num_p_filters; j++) { if (png_ptr->prev_filters[j] == PNG_FILTER_VALUE_UP) { lmlo = (lmlo * png_ptr->inv_filter_weights[j]) >> PNG_WEIGHT_SHIFT; lmhi = (lmhi * png_ptr->inv_filter_weights[j]) >> PNG_WEIGHT_SHIFT; } } lmlo = (lmlo * png_ptr->inv_filter_costs[PNG_FILTER_VALUE_UP]) >> PNG_COST_SHIFT; lmhi = (lmhi * png_ptr->inv_filter_costs[PNG_FILTER_VALUE_UP]) >> PNG_COST_SHIFT; if (lmhi > PNG_HIMASK) lmins = PNG_MAXSUM; else lmins = (lmhi << PNG_HISHIFT) + lmlo; } #endif for (i = 0, rp = row_buf + 1, dp = png_ptr->up_row + 1, pp = prev_row + 1; i < row_bytes; i++) { v = *dp++ = (png_byte)(((int)*rp++ - (int)*pp++) & 0xff); sum += (v < 128) ? v : 256 - v; if (sum > lmins) /* We are already worse, don't continue. */ break; } #ifdef PNG_WRITE_WEIGHTED_FILTER_SUPPORTED if (png_ptr->heuristic_method == PNG_FILTER_HEURISTIC_WEIGHTED) { int j; png_uint_32 sumhi, sumlo; sumlo = sum & PNG_LOMASK; sumhi = (sum >> PNG_HISHIFT) & PNG_HIMASK; for (j = 0; j < num_p_filters; j++) { if (png_ptr->prev_filters[j] == PNG_FILTER_VALUE_UP) { sumlo = (sumlo * png_ptr->filter_weights[j]) >> PNG_WEIGHT_SHIFT; sumhi = (sumhi * png_ptr->filter_weights[j]) >> PNG_WEIGHT_SHIFT; } } sumlo = (sumlo * png_ptr->filter_costs[PNG_FILTER_VALUE_UP]) >> PNG_COST_SHIFT; sumhi = (sumhi * png_ptr->filter_costs[PNG_FILTER_VALUE_UP]) >> PNG_COST_SHIFT; if (sumhi > PNG_HIMASK) sum = PNG_MAXSUM; else sum = (sumhi << PNG_HISHIFT) + sumlo; } #endif if (sum < mins) { mins = sum; best_row = png_ptr->up_row; } } /* Avg filter */ if (filter_to_do == PNG_FILTER_AVG) { png_bytep rp, dp, pp, lp; png_uint_32 i; for (i = 0, rp = row_buf + 1, dp = png_ptr->avg_row + 1, pp = prev_row + 1; i < bpp; i++) { *dp++ = (png_byte)(((int)*rp++ - ((int)*pp++ / 2)) & 0xff); } for (lp = row_buf + 1; i < row_bytes; i++) { *dp++ = (png_byte)(((int)*rp++ - (((int)*pp++ + (int)*lp++) / 2)) & 0xff); } best_row = png_ptr->avg_row; } else if (filter_to_do & PNG_FILTER_AVG) { png_bytep rp, dp, pp, lp; png_uint_32 sum = 0, lmins = mins; png_uint_32 i; int v; #ifdef PNG_WRITE_WEIGHTED_FILTER_SUPPORTED if (png_ptr->heuristic_method == PNG_FILTER_HEURISTIC_WEIGHTED) { int j; png_uint_32 lmhi, lmlo; lmlo = lmins & PNG_LOMASK; lmhi = (lmins >> PNG_HISHIFT) & PNG_HIMASK; for (j = 0; j < num_p_filters; j++) { if (png_ptr->prev_filters[j] == PNG_FILTER_VALUE_AVG) { lmlo = (lmlo * png_ptr->inv_filter_weights[j]) >> PNG_WEIGHT_SHIFT; lmhi = (lmhi * png_ptr->inv_filter_weights[j]) >> PNG_WEIGHT_SHIFT; } } lmlo = (lmlo * png_ptr->inv_filter_costs[PNG_FILTER_VALUE_AVG]) >> PNG_COST_SHIFT; lmhi = (lmhi * png_ptr->inv_filter_costs[PNG_FILTER_VALUE_AVG]) >> PNG_COST_SHIFT; if (lmhi > PNG_HIMASK) lmins = PNG_MAXSUM; else lmins = (lmhi << PNG_HISHIFT) + lmlo; } #endif for (i = 0, rp = row_buf + 1, dp = png_ptr->avg_row + 1, pp = prev_row + 1; i < bpp; i++) { v = *dp++ = (png_byte)(((int)*rp++ - ((int)*pp++ / 2)) & 0xff); sum += (v < 128) ? v : 256 - v; } for (lp = row_buf + 1; i < row_bytes; i++) { v = *dp++ = (png_byte)(((int)*rp++ - (((int)*pp++ + (int)*lp++) / 2)) & 0xff); sum += (v < 128) ? v : 256 - v; if (sum > lmins) /* We are already worse, don't continue. */ break; } #ifdef PNG_WRITE_WEIGHTED_FILTER_SUPPORTED if (png_ptr->heuristic_method == PNG_FILTER_HEURISTIC_WEIGHTED) { int j; png_uint_32 sumhi, sumlo; sumlo = sum & PNG_LOMASK; sumhi = (sum >> PNG_HISHIFT) & PNG_HIMASK; for (j = 0; j < num_p_filters; j++) { if (png_ptr->prev_filters[j] == PNG_FILTER_VALUE_NONE) { sumlo = (sumlo * png_ptr->filter_weights[j]) >> PNG_WEIGHT_SHIFT; sumhi = (sumhi * png_ptr->filter_weights[j]) >> PNG_WEIGHT_SHIFT; } } sumlo = (sumlo * png_ptr->filter_costs[PNG_FILTER_VALUE_AVG]) >> PNG_COST_SHIFT; sumhi = (sumhi * png_ptr->filter_costs[PNG_FILTER_VALUE_AVG]) >> PNG_COST_SHIFT; if (sumhi > PNG_HIMASK) sum = PNG_MAXSUM; else sum = (sumhi << PNG_HISHIFT) + sumlo; } #endif if (sum < mins) { mins = sum; best_row = png_ptr->avg_row; } } /* Paeth filter */ if (filter_to_do == PNG_FILTER_PAETH) { png_bytep rp, dp, pp, cp, lp; png_uint_32 i; for (i = 0, rp = row_buf + 1, dp = png_ptr->paeth_row + 1, pp = prev_row + 1; i < bpp; i++) { *dp++ = (png_byte)(((int)*rp++ - (int)*pp++) & 0xff); } for (lp = row_buf + 1, cp = prev_row + 1; i < row_bytes; i++) { int a, b, c, pa, pb, pc, p; b = *pp++; c = *cp++; a = *lp++; p = b - c; pc = a - c; #ifdef PNG_USE_ABS pa = abs(p); pb = abs(pc); pc = abs(p + pc); #else pa = p < 0 ? -p : p; pb = pc < 0 ? -pc : pc; pc = (p + pc) < 0 ? -(p + pc) : p + pc; #endif p = (pa <= pb && pa <=pc) ? a : (pb <= pc) ? b : c; *dp++ = (png_byte)(((int)*rp++ - p) & 0xff); } best_row = png_ptr->paeth_row; } else if (filter_to_do & PNG_FILTER_PAETH) { png_bytep rp, dp, pp, cp, lp; png_uint_32 sum = 0, lmins = mins; png_uint_32 i; int v; #ifdef PNG_WRITE_WEIGHTED_FILTER_SUPPORTED if (png_ptr->heuristic_method == PNG_FILTER_HEURISTIC_WEIGHTED) { int j; png_uint_32 lmhi, lmlo; lmlo = lmins & PNG_LOMASK; lmhi = (lmins >> PNG_HISHIFT) & PNG_HIMASK; for (j = 0; j < num_p_filters; j++) { if (png_ptr->prev_filters[j] == PNG_FILTER_VALUE_PAETH) { lmlo = (lmlo * png_ptr->inv_filter_weights[j]) >> PNG_WEIGHT_SHIFT; lmhi = (lmhi * png_ptr->inv_filter_weights[j]) >> PNG_WEIGHT_SHIFT; } } lmlo = (lmlo * png_ptr->inv_filter_costs[PNG_FILTER_VALUE_PAETH]) >> PNG_COST_SHIFT; lmhi = (lmhi * png_ptr->inv_filter_costs[PNG_FILTER_VALUE_PAETH]) >> PNG_COST_SHIFT; if (lmhi > PNG_HIMASK) lmins = PNG_MAXSUM; else lmins = (lmhi << PNG_HISHIFT) + lmlo; } #endif for (i = 0, rp = row_buf + 1, dp = png_ptr->paeth_row + 1, pp = prev_row + 1; i < bpp; i++) { v = *dp++ = (png_byte)(((int)*rp++ - (int)*pp++) & 0xff); sum += (v < 128) ? v : 256 - v; } for (lp = row_buf + 1, cp = prev_row + 1; i < row_bytes; i++) { int a, b, c, pa, pb, pc, p; b = *pp++; c = *cp++; a = *lp++; #ifndef PNG_SLOW_PAETH p = b - c; pc = a - c; #ifdef PNG_USE_ABS pa = abs(p); pb = abs(pc); pc = abs(p + pc); #else pa = p < 0 ? -p : p; pb = pc < 0 ? -pc : pc; pc = (p + pc) < 0 ? -(p + pc) : p + pc; #endif p = (pa <= pb && pa <=pc) ? a : (pb <= pc) ? b : c; #else /* PNG_SLOW_PAETH */ p = a + b - c; pa = abs(p - a); pb = abs(p - b); pc = abs(p - c); if (pa <= pb && pa <= pc) p = a; else if (pb <= pc) p = b; else p = c; #endif /* PNG_SLOW_PAETH */ v = *dp++ = (png_byte)(((int)*rp++ - p) & 0xff); sum += (v < 128) ? v : 256 - v; if (sum > lmins) /* We are already worse, don't continue. */ break; } #ifdef PNG_WRITE_WEIGHTED_FILTER_SUPPORTED if (png_ptr->heuristic_method == PNG_FILTER_HEURISTIC_WEIGHTED) { int j; png_uint_32 sumhi, sumlo; sumlo = sum & PNG_LOMASK; sumhi = (sum >> PNG_HISHIFT) & PNG_HIMASK; for (j = 0; j < num_p_filters; j++) { if (png_ptr->prev_filters[j] == PNG_FILTER_VALUE_PAETH) { sumlo = (sumlo * png_ptr->filter_weights[j]) >> PNG_WEIGHT_SHIFT; sumhi = (sumhi * png_ptr->filter_weights[j]) >> PNG_WEIGHT_SHIFT; } } sumlo = (sumlo * png_ptr->filter_costs[PNG_FILTER_VALUE_PAETH]) >> PNG_COST_SHIFT; sumhi = (sumhi * png_ptr->filter_costs[PNG_FILTER_VALUE_PAETH]) >> PNG_COST_SHIFT; if (sumhi > PNG_HIMASK) sum = PNG_MAXSUM; else sum = (sumhi << PNG_HISHIFT) + sumlo; } #endif if (sum < mins) { best_row = png_ptr->paeth_row; } } #endif /* PNG_WRITE_FILTER_SUPPORTED */ /* Do the actual writing of the filtered row data from the chosen filter. */ png_write_filtered_row(png_ptr, best_row); #ifdef PNG_WRITE_FILTER_SUPPORTED #ifdef PNG_WRITE_WEIGHTED_FILTER_SUPPORTED /* Save the type of filter we picked this time for future calculations */ if (png_ptr->num_prev_filters > 0) { int j; for (j = 1; j < num_p_filters; j++) { png_ptr->prev_filters[j] = png_ptr->prev_filters[j - 1]; } png_ptr->prev_filters[j] = best_row[0]; } #endif #endif /* PNG_WRITE_FILTER_SUPPORTED */ } Commit Message: third_party/libpng: update to 1.2.54 [email protected] BUG=560291 Review URL: https://codereview.chromium.org/1467263003 Cr-Commit-Position: refs/heads/master@{#362298} CWE ID: CWE-119
png_write_find_filter(png_structp png_ptr, png_row_infop row_info) { png_bytep best_row; #ifdef PNG_WRITE_FILTER_SUPPORTED png_bytep prev_row, row_buf; png_uint_32 mins, bpp; png_byte filter_to_do = png_ptr->do_filter; png_uint_32 row_bytes = row_info->rowbytes; png_debug(1, "in png_write_find_filter"); /* Find out how many bytes offset each pixel is */ bpp = (row_info->pixel_depth + 7) >> 3; prev_row = png_ptr->prev_row; #endif best_row = png_ptr->row_buf; #ifdef PNG_WRITE_FILTER_SUPPORTED row_buf = best_row; mins = PNG_MAXSUM; /* The prediction method we use is to find which method provides the * smallest value when summing the absolute values of the distances * from zero, using anything >= 128 as negative numbers. This is known * as the "minimum sum of absolute differences" heuristic. Other * heuristics are the "weighted minimum sum of absolute differences" * (experimental and can in theory improve compression), and the "zlib * predictive" method (not implemented yet), which does test compressions * of lines using different filter methods, and then chooses the * (series of) filter(s) that give minimum compressed data size (VERY * computationally expensive). * * GRR 980525: consider also * (1) minimum sum of absolute differences from running average (i.e., * keep running sum of non-absolute differences & count of bytes) * [track dispersion, too? restart average if dispersion too large?] * (1b) minimum sum of absolute differences from sliding average, probably * with window size <= deflate window (usually 32K) * (2) minimum sum of squared differences from zero or running average * (i.e., ~ root-mean-square approach) */ /* We don't need to test the 'no filter' case if this is the only filter * that has been chosen, as it doesn't actually do anything to the data. */ if ((filter_to_do & PNG_FILTER_NONE) && filter_to_do != PNG_FILTER_NONE) { png_bytep rp; png_uint_32 sum = 0; png_uint_32 i; int v; for (i = 0, rp = row_buf + 1; i < row_bytes; i++, rp++) { v = *rp; sum += (v < 128) ? v : 256 - v; } mins = sum; } /* Sub filter */ if (filter_to_do == PNG_FILTER_SUB) /* It's the only filter so no testing is needed */ { png_bytep rp, lp, dp; png_uint_32 i; for (i = 0, rp = row_buf + 1, dp = png_ptr->sub_row + 1; i < bpp; i++, rp++, dp++) { *dp = *rp; } for (lp = row_buf + 1; i < row_bytes; i++, rp++, lp++, dp++) { *dp = (png_byte)(((int)*rp - (int)*lp) & 0xff); } best_row = png_ptr->sub_row; } else if (filter_to_do & PNG_FILTER_SUB) { png_bytep rp, dp, lp; png_uint_32 sum = 0, lmins = mins; png_uint_32 i; int v; for (i = 0, rp = row_buf + 1, dp = png_ptr->sub_row + 1; i < bpp; i++, rp++, dp++) { v = *dp = *rp; sum += (v < 128) ? v : 256 - v; } for (lp = row_buf + 1; i < row_bytes; i++, rp++, lp++, dp++) { v = *dp = (png_byte)(((int)*rp - (int)*lp) & 0xff); sum += (v < 128) ? v : 256 - v; if (sum > lmins) /* We are already worse, don't continue. */ break; } if (sum < mins) { mins = sum; best_row = png_ptr->sub_row; } } /* Up filter */ if (filter_to_do == PNG_FILTER_UP) { png_bytep rp, dp, pp; png_uint_32 i; for (i = 0, rp = row_buf + 1, dp = png_ptr->up_row + 1, pp = prev_row + 1; i < row_bytes; i++, rp++, pp++, dp++) { *dp = (png_byte)(((int)*rp - (int)*pp) & 0xff); } best_row = png_ptr->up_row; } else if (filter_to_do & PNG_FILTER_UP) { png_bytep rp, dp, pp; png_uint_32 sum = 0, lmins = mins; png_uint_32 i; int v; for (i = 0, rp = row_buf + 1, dp = png_ptr->up_row + 1, pp = prev_row + 1; i < row_bytes; i++) { v = *dp++ = (png_byte)(((int)*rp++ - (int)*pp++) & 0xff); sum += (v < 128) ? v : 256 - v; if (sum > lmins) /* We are already worse, don't continue. */ break; } if (sum < mins) { mins = sum; best_row = png_ptr->up_row; } } /* Avg filter */ if (filter_to_do == PNG_FILTER_AVG) { png_bytep rp, dp, pp, lp; png_uint_32 i; for (i = 0, rp = row_buf + 1, dp = png_ptr->avg_row + 1, pp = prev_row + 1; i < bpp; i++) { *dp++ = (png_byte)(((int)*rp++ - ((int)*pp++ / 2)) & 0xff); } for (lp = row_buf + 1; i < row_bytes; i++) { *dp++ = (png_byte)(((int)*rp++ - (((int)*pp++ + (int)*lp++) / 2)) & 0xff); } best_row = png_ptr->avg_row; } else if (filter_to_do & PNG_FILTER_AVG) { png_bytep rp, dp, pp, lp; png_uint_32 sum = 0, lmins = mins; png_uint_32 i; int v; for (i = 0, rp = row_buf + 1, dp = png_ptr->avg_row + 1, pp = prev_row + 1; i < bpp; i++) { v = *dp++ = (png_byte)(((int)*rp++ - ((int)*pp++ / 2)) & 0xff); sum += (v < 128) ? v : 256 - v; } for (lp = row_buf + 1; i < row_bytes; i++) { v = *dp++ = (png_byte)(((int)*rp++ - (((int)*pp++ + (int)*lp++) / 2)) & 0xff); sum += (v < 128) ? v : 256 - v; if (sum > lmins) /* We are already worse, don't continue. */ break; } if (sum < mins) { mins = sum; best_row = png_ptr->avg_row; } } /* Paeth filter */ if (filter_to_do == PNG_FILTER_PAETH) { png_bytep rp, dp, pp, cp, lp; png_uint_32 i; for (i = 0, rp = row_buf + 1, dp = png_ptr->paeth_row + 1, pp = prev_row + 1; i < bpp; i++) { *dp++ = (png_byte)(((int)*rp++ - (int)*pp++) & 0xff); } for (lp = row_buf + 1, cp = prev_row + 1; i < row_bytes; i++) { int a, b, c, pa, pb, pc, p; b = *pp++; c = *cp++; a = *lp++; p = b - c; pc = a - c; #ifdef PNG_USE_ABS pa = abs(p); pb = abs(pc); pc = abs(p + pc); #else pa = p < 0 ? -p : p; pb = pc < 0 ? -pc : pc; pc = (p + pc) < 0 ? -(p + pc) : p + pc; #endif p = (pa <= pb && pa <=pc) ? a : (pb <= pc) ? b : c; *dp++ = (png_byte)(((int)*rp++ - p) & 0xff); } best_row = png_ptr->paeth_row; } else if (filter_to_do & PNG_FILTER_PAETH) { png_bytep rp, dp, pp, cp, lp; png_uint_32 sum = 0, lmins = mins; png_uint_32 i; int v; for (i = 0, rp = row_buf + 1, dp = png_ptr->paeth_row + 1, pp = prev_row + 1; i < bpp; i++) { v = *dp++ = (png_byte)(((int)*rp++ - (int)*pp++) & 0xff); sum += (v < 128) ? v : 256 - v; } for (lp = row_buf + 1, cp = prev_row + 1; i < row_bytes; i++) { int a, b, c, pa, pb, pc, p; b = *pp++; c = *cp++; a = *lp++; #ifndef PNG_SLOW_PAETH p = b - c; pc = a - c; #ifdef PNG_USE_ABS pa = abs(p); pb = abs(pc); pc = abs(p + pc); #else pa = p < 0 ? -p : p; pb = pc < 0 ? -pc : pc; pc = (p + pc) < 0 ? -(p + pc) : p + pc; #endif p = (pa <= pb && pa <=pc) ? a : (pb <= pc) ? b : c; #else /* PNG_SLOW_PAETH */ p = a + b - c; pa = abs(p - a); pb = abs(p - b); pc = abs(p - c); if (pa <= pb && pa <= pc) p = a; else if (pb <= pc) p = b; else p = c; #endif /* PNG_SLOW_PAETH */ v = *dp++ = (png_byte)(((int)*rp++ - p) & 0xff); sum += (v < 128) ? v : 256 - v; if (sum > lmins) /* We are already worse, don't continue. */ break; } if (sum < mins) { best_row = png_ptr->paeth_row; } } #endif /* PNG_WRITE_FILTER_SUPPORTED */ /* Do the actual writing of the filtered row data from the chosen filter. */ png_write_filtered_row(png_ptr, best_row); }
172,194
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool GesturePoint::IsInSecondClickTimeWindow() const { double duration = last_touch_time_ - last_tap_time_; return duration < kMaximumSecondsBetweenDoubleClick; } Commit Message: Add setters for the aura gesture recognizer constants. BUG=113227 TEST=none Review URL: http://codereview.chromium.org/9372040 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@122586 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-20
bool GesturePoint::IsInSecondClickTimeWindow() const { double duration = last_touch_time_ - last_tap_time_; return duration < GestureConfiguration::max_seconds_between_double_click(); }
171,043
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static u32 svc_rdma_get_inv_rkey(struct rpcrdma_msg *rdma_argp, struct rpcrdma_write_array *wr_ary, struct rpcrdma_write_array *rp_ary) { struct rpcrdma_read_chunk *rd_ary; struct rpcrdma_segment *arg_ch; rd_ary = (struct rpcrdma_read_chunk *)&rdma_argp->rm_body.rm_chunks[0]; if (rd_ary->rc_discrim != xdr_zero) return be32_to_cpu(rd_ary->rc_target.rs_handle); if (wr_ary && be32_to_cpu(wr_ary->wc_nchunks)) { arg_ch = &wr_ary->wc_array[0].wc_target; return be32_to_cpu(arg_ch->rs_handle); } if (rp_ary && be32_to_cpu(rp_ary->wc_nchunks)) { arg_ch = &rp_ary->wc_array[0].wc_target; return be32_to_cpu(arg_ch->rs_handle); } return 0; } Commit Message: Merge tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux Pull nfsd updates from Bruce Fields: "Another RDMA update from Chuck Lever, and a bunch of miscellaneous bugfixes" * tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux: (26 commits) nfsd: Fix up the "supattr_exclcreat" attributes nfsd: encoders mustn't use unitialized values in error cases nfsd: fix undefined behavior in nfsd4_layout_verify lockd: fix lockd shutdown race NFSv4: Fix callback server shutdown SUNRPC: Refactor svc_set_num_threads() NFSv4.x/callback: Create the callback service through svc_create_pooled lockd: remove redundant check on block svcrdma: Clean out old XDR encoders svcrdma: Remove the req_map cache svcrdma: Remove unused RDMA Write completion handler svcrdma: Reduce size of sge array in struct svc_rdma_op_ctxt svcrdma: Clean up RPC-over-RDMA backchannel reply processing svcrdma: Report Write/Reply chunk overruns svcrdma: Clean up RDMA_ERROR path svcrdma: Use rdma_rw API in RPC reply path svcrdma: Introduce local rdma_rw API helpers svcrdma: Clean up svc_rdma_get_inv_rkey() svcrdma: Add helper to save pages under I/O svcrdma: Eliminate RPCRDMA_SQ_DEPTH_MULT ... CWE ID: CWE-404
static u32 svc_rdma_get_inv_rkey(struct rpcrdma_msg *rdma_argp, static u32 svc_rdma_get_inv_rkey(__be32 *rdma_argp, __be32 *wr_lst, __be32 *rp_ch) { __be32 *p; p = rdma_argp + rpcrdma_fixed_maxsz; if (*p != xdr_zero) p += 2; else if (wr_lst && be32_to_cpup(wr_lst + 1)) p = wr_lst + 2; else if (rp_ch && be32_to_cpup(rp_ch + 1)) p = rp_ch + 2; else return 0; return be32_to_cpup(p); } /* ib_dma_map_page() is used here because svc_rdma_dma_unmap() * is used during completion to DMA-unmap this memory, and * it uses ib_dma_unmap_page() exclusively. */ static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma, struct svc_rdma_op_ctxt *ctxt, unsigned int sge_no, unsigned char *base, unsigned int len) { unsigned long offset = (unsigned long)base & ~PAGE_MASK; struct ib_device *dev = rdma->sc_cm_id->device; dma_addr_t dma_addr; dma_addr = ib_dma_map_page(dev, virt_to_page(base), offset, len, DMA_TO_DEVICE); if (ib_dma_mapping_error(dev, dma_addr)) return -EIO; ctxt->sge[sge_no].addr = dma_addr; ctxt->sge[sge_no].length = len; ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey; svc_rdma_count_mappings(rdma, ctxt); return 0; }
168,171
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void copy_xauthority(void) { char *src = RUN_XAUTHORITY_FILE ; char *dest; if (asprintf(&dest, "%s/.Xauthority", cfg.homedir) == -1) errExit("asprintf"); if (is_link(dest)) { fprintf(stderr, "Error: %s is a symbolic link\n", dest); exit(1); } pid_t child = fork(); if (child < 0) errExit("fork"); if (child == 0) { drop_privs(0); int rv = copy_file(src, dest); if (rv) fprintf(stderr, "Warning: cannot transfer .Xauthority in private home directory\n"); else { fs_logger2("clone", dest); } _exit(0); } waitpid(child, NULL, 0); if (chown(dest, getuid(), getgid()) < 0) errExit("chown"); if (chmod(dest, S_IRUSR | S_IWUSR) < 0) errExit("chmod"); unlink(src); } Commit Message: security fix CWE ID: CWE-269
static void copy_xauthority(void) { char *src = RUN_XAUTHORITY_FILE ; char *dest; if (asprintf(&dest, "%s/.Xauthority", cfg.homedir) == -1) errExit("asprintf"); if (is_link(dest)) { fprintf(stderr, "Error: %s is a symbolic link\n", dest); exit(1); } copy_file_as_user(src, dest, getuid(), getgid(), S_IRUSR | S_IWUSR); // regular user fs_logger2("clone", dest); unlink(src); }
170,097
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: flac_read_loop (SF_PRIVATE *psf, unsigned len) { FLAC_PRIVATE* pflac = (FLAC_PRIVATE*) psf->codec_data ; pflac->pos = 0 ; pflac->len = len ; pflac->remain = len ; /* First copy data that has already been decoded and buffered. */ if (pflac->frame != NULL && pflac->bufferpos < pflac->frame->header.blocksize) flac_buffer_copy (psf) ; /* Decode some more. */ while (pflac->pos < pflac->len) { if (FLAC__stream_decoder_process_single (pflac->fsd) == 0) break ; if (FLAC__stream_decoder_get_state (pflac->fsd) >= FLAC__STREAM_DECODER_END_OF_STREAM) break ; } ; pflac->ptr = NULL ; return pflac->pos ; } /* flac_read_loop */ Commit Message: src/flac.c: Improve error handling Especially when dealing with corrupt or malicious files. CWE ID: CWE-119
flac_read_loop (SF_PRIVATE *psf, unsigned len) { FLAC_PRIVATE* pflac = (FLAC_PRIVATE*) psf->codec_data ; FLAC__StreamDecoderState state ; pflac->pos = 0 ; pflac->len = len ; pflac->remain = len ; state = FLAC__stream_decoder_get_state (pflac->fsd) ; if (state > FLAC__STREAM_DECODER_END_OF_STREAM) { psf_log_printf (psf, "FLAC__stream_decoder_get_state returned %s\n", FLAC__StreamDecoderStateString [state]) ; /* Current frame is busted, so NULL the pointer. */ pflac->frame = NULL ; } ; /* First copy data that has already been decoded and buffered. */ if (pflac->frame != NULL && pflac->bufferpos < pflac->frame->header.blocksize) flac_buffer_copy (psf) ; /* Decode some more. */ while (pflac->pos < pflac->len) { if (FLAC__stream_decoder_process_single (pflac->fsd) == 0) break ; state = FLAC__stream_decoder_get_state (pflac->fsd) ; if (state >= FLAC__STREAM_DECODER_END_OF_STREAM) { psf_log_printf (psf, "FLAC__stream_decoder_get_state returned %s\n", FLAC__StreamDecoderStateString [state]) ; /* Current frame is busted, so NULL the pointer. */ pflac->frame = NULL ; break ; } ; } ; pflac->ptr = NULL ; return pflac->pos ; } /* flac_read_loop */
168,255
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: WORD32 ihevcd_ref_list(codec_t *ps_codec, pps_t *ps_pps, sps_t *ps_sps, slice_header_t *ps_slice_hdr) { WORD32 i; WORD32 st_rps_idx; WORD32 num_neg_pics, num_pos_pics; WORD8 *pi1_used; WORD16 *pi2_delta_poc; UWORD32 u4_max_poc_lsb; pic_buf_t *ps_pic_buf; mv_buf_t *ps_mv_buf; UWORD32 r_idx; dpb_mgr_t *ps_dpb_mgr = (dpb_mgr_t *)ps_codec->pv_dpb_mgr; buf_mgr_t *ps_mv_buf_mgr = (buf_mgr_t *)ps_codec->pv_mv_buf_mgr; WORD32 ai4_poc_st_curr_before[MAX_DPB_SIZE], ai4_poc_st_foll[MAX_DPB_SIZE], ai4_poc_st_curr_after[MAX_DPB_SIZE]; WORD32 ai4_poc_lt_curr[MAX_DPB_SIZE], ai4_poc_lt_foll[MAX_DPB_SIZE]; UWORD32 u4_num_st_curr_before, u4_num_st_foll, u4_num_st_curr_after, u4_num_lt_curr, u4_num_lt_foll; UWORD32 u4_num_total_curr; WORD8 ai1_curr_delta_poc_msb_present_flag[MAX_DPB_SIZE], ai1_foll_delta_poc_msb_present_flag[MAX_DPB_SIZE]; pic_buf_t *as_ref_pic_lt_curr[MAX_DPB_SIZE]; pic_buf_t *as_ref_pic_lt_foll[MAX_DPB_SIZE]; pic_buf_t *as_ref_pic_st_curr_after[MAX_DPB_SIZE]; pic_buf_t *as_ref_pic_st_curr_before[MAX_DPB_SIZE]; pic_buf_t *as_ref_pic_st_foll[MAX_DPB_SIZE]; pic_buf_t *as_ref_pic_list_temp0[MAX_DPB_SIZE], *as_ref_pic_list_temp1[MAX_DPB_SIZE]; UWORD32 u4_num_rps_curr_temp_list0, u4_num_rps_curr_temp_list1; WORD32 i4_pic_order_cnt_val; WORD32 i4_poc_lt; UNUSED(as_ref_pic_lt_foll); UNUSED(as_ref_pic_st_foll); UNUSED(ps_pps); RETURN_IF_NAL_INFO; u4_max_poc_lsb = (1 << ps_sps->i1_log2_max_pic_order_cnt_lsb); i4_pic_order_cnt_val = ps_slice_hdr->i4_abs_pic_order_cnt; if(1 == ps_slice_hdr->i1_short_term_ref_pic_set_sps_flag) { st_rps_idx = ps_slice_hdr->i1_short_term_ref_pic_set_idx; num_neg_pics = ps_sps->as_stref_picset[st_rps_idx].i1_num_neg_pics; num_pos_pics = ps_sps->as_stref_picset[st_rps_idx].i1_num_pos_pics; pi1_used = ps_sps->as_stref_picset[st_rps_idx].ai1_used; pi2_delta_poc = ps_sps->as_stref_picset[st_rps_idx].ai2_delta_poc; } else { st_rps_idx = ps_sps->i1_num_short_term_ref_pic_sets; num_neg_pics = ps_slice_hdr->s_stref_picset.i1_num_neg_pics; num_pos_pics = ps_slice_hdr->s_stref_picset.i1_num_pos_pics; pi1_used = ps_slice_hdr->s_stref_picset.ai1_used; pi2_delta_poc = ps_slice_hdr->s_stref_picset.ai2_delta_poc; } u4_num_st_curr_before = 0; u4_num_st_foll = 0; for(i = 0; i < num_neg_pics; i++) { if(pi1_used[i]) { ai4_poc_st_curr_before[u4_num_st_curr_before] = i4_pic_order_cnt_val + pi2_delta_poc[i]; u4_num_st_curr_before++; } else { ai4_poc_st_foll[u4_num_st_foll] = i4_pic_order_cnt_val + pi2_delta_poc[i]; u4_num_st_foll++; } } u4_num_st_curr_after = 0; for(i = num_neg_pics; i < num_neg_pics + num_pos_pics; i++) { if(pi1_used[i]) { ai4_poc_st_curr_after[u4_num_st_curr_after] = i4_pic_order_cnt_val + pi2_delta_poc[i]; u4_num_st_curr_after++; } else { ai4_poc_st_foll[u4_num_st_foll] = i4_pic_order_cnt_val + pi2_delta_poc[i]; u4_num_st_foll++; } } u4_num_lt_curr = 0; u4_num_lt_foll = 0; for(i = 0; i < ps_slice_hdr->i1_num_long_term_sps + ps_slice_hdr->i1_num_long_term_pics; i++) { i4_poc_lt = ps_slice_hdr->ai4_poc_lsb_lt[i]; if(ps_slice_hdr->ai1_delta_poc_msb_present_flag[i]) { i4_poc_lt += i4_pic_order_cnt_val - ps_slice_hdr->ai1_delta_poc_msb_cycle_lt[i] * u4_max_poc_lsb - ps_slice_hdr->i4_pic_order_cnt_lsb; } if(ps_slice_hdr->ai1_used_by_curr_pic_lt_flag[i]) { ai4_poc_lt_curr[u4_num_lt_curr] = i4_poc_lt; ai1_curr_delta_poc_msb_present_flag[u4_num_lt_curr] = ps_slice_hdr->ai1_delta_poc_msb_present_flag[i]; u4_num_lt_curr++; } else { ai4_poc_lt_foll[u4_num_lt_foll] = i4_poc_lt; ai1_foll_delta_poc_msb_present_flag[u4_num_lt_foll] = ps_slice_hdr->ai1_delta_poc_msb_present_flag[i]; u4_num_lt_foll++; } } u4_num_total_curr = u4_num_lt_curr + u4_num_st_curr_after + u4_num_st_curr_before; /* Bit stream conformance tests */ /* for(i = 0; i < u4_num_lt_curr; i++) { int j; if(ai1_curr_delta_poc_msb_present_flag[i]) { for(j = 0; j < u4_num_st_curr_before; j++) { ASSERT(ai4_poc_st_curr_before[j] != ai4_poc_lt_curr[i]); } for(j = 0; j < u4_num_st_curr_after; j++) { ASSERT(ai4_poc_st_curr_after[j] != ai4_poc_lt_curr[i]); } for(j = 0; j < u4_num_st_foll; j++) { ASSERT(ai4_poc_st_foll[j] != ai4_poc_lt_curr[i]); } for(j = 0; j < u4_num_lt_curr; j++) { ASSERT((ai4_poc_lt_curr[j] != ai4_poc_lt_curr[i]) || (j == i)); } } else { for(j = 0; j < u4_num_st_curr_before; j++) { ASSERT((ai4_poc_st_curr_before[j] & (u4_max_poc_lsb - 1)) != ai4_poc_lt_curr[i]); } for(j = 0; j < u4_num_st_curr_after; j++) { ASSERT((ai4_poc_st_curr_after[j] & (u4_max_poc_lsb - 1)) != ai4_poc_lt_curr[i]); } for(j = 0; j < u4_num_st_foll; j++) { ASSERT((ai4_poc_st_foll[j] & (u4_max_poc_lsb - 1)) != ai4_poc_lt_curr[i]); } for(j = 0; j < u4_num_lt_curr; j++) { ASSERT(((ai4_poc_lt_curr[j] & (u4_max_poc_lsb - 1)) != ai4_poc_lt_curr[i]) || (j == i)); } } } for(i = 0; i < u4_num_lt_foll; i++) { int j; if(ai1_foll_delta_poc_msb_present_flag[i]) { for(j = 0; j < u4_num_st_curr_before; j++) { ASSERT(ai4_poc_st_curr_before[j] != ai4_poc_lt_foll[i]); } for(j = 0; j < u4_num_st_curr_after; j++) { ASSERT(ai4_poc_st_curr_after[j] != ai4_poc_lt_foll[i]); } for(j = 0; j < u4_num_st_foll; j++) { ASSERT(ai4_poc_st_foll[j] != ai4_poc_lt_foll[i]); } for(j = 0; j < u4_num_lt_curr; j++) { ASSERT(ai4_poc_lt_curr[j] != ai4_poc_lt_foll[i]); } for(j = 0; j < u4_num_lt_foll; j++) { ASSERT((ai4_poc_lt_foll[j] != ai4_poc_lt_foll[i]) || (j == i)); } } else { for(j = 0; j < u4_num_st_curr_before; j++) { ASSERT((ai4_poc_st_curr_before[j] & (u4_max_poc_lsb - 1)) != ai4_poc_lt_foll[i]); } for(j = 0; j < u4_num_st_curr_after; j++) { ASSERT((ai4_poc_st_curr_after[j] & (u4_max_poc_lsb - 1)) != ai4_poc_lt_foll[i]); } for(j = 0; j < u4_num_st_foll; j++) { ASSERT((ai4_poc_st_foll[j] & (u4_max_poc_lsb - 1)) != ai4_poc_lt_foll[i]); } for(j = 0; j < u4_num_lt_curr; j++) { ASSERT((ai4_poc_lt_curr[j] & (u4_max_poc_lsb - 1)) != ai4_poc_lt_foll[i]); } for(j = 0; j < u4_num_lt_foll; j++) { ASSERT(((ai4_poc_lt_foll[j] & (u4_max_poc_lsb - 1)) != ai4_poc_lt_foll[i]) || (j == i)); } } } */ /* Reference Pic sets creation */ /* Set all the DPB buffers to UNUSED_FOR_REF */ if(0 == ps_codec->i4_pic_present) { for(i = 0; i < MAX_DPB_BUFS; i++) { if(ps_dpb_mgr->as_dpb_info[i].ps_pic_buf) ps_dpb_mgr->as_dpb_info[i].ps_pic_buf->u1_used_as_ref = UNUSED_FOR_REF; } } for(i = 0; i < (WORD32)u4_num_lt_curr; i++) { if(0 == ai1_curr_delta_poc_msb_present_flag[i]) { ps_pic_buf = ihevc_dpb_mgr_get_ref_by_poc_lsb(ps_dpb_mgr, ai4_poc_lt_curr[i]); if(NULL != ps_pic_buf) ps_pic_buf->u1_used_as_ref = LONG_TERM_REF; as_ref_pic_lt_curr[i] = ps_pic_buf; } else { ps_pic_buf = ihevc_dpb_mgr_get_ref_by_poc(ps_dpb_mgr, ai4_poc_lt_curr[i]); if(NULL != ps_pic_buf) ps_pic_buf->u1_used_as_ref = LONG_TERM_REF; as_ref_pic_lt_curr[i] = ps_pic_buf; } } for(i = 0; i < (WORD32)u4_num_lt_foll; i++) { if(0 == ai1_foll_delta_poc_msb_present_flag[i]) { ps_pic_buf = ihevc_dpb_mgr_get_ref_by_poc_lsb(ps_dpb_mgr, ai4_poc_lt_foll[i]); if(NULL != ps_pic_buf) ps_pic_buf->u1_used_as_ref = LONG_TERM_REF; as_ref_pic_lt_foll[i] = ps_pic_buf; } else { ps_pic_buf = ihevc_dpb_mgr_get_ref_by_poc(ps_dpb_mgr, ai4_poc_lt_foll[i]); if(NULL != ps_pic_buf) ps_pic_buf->u1_used_as_ref = LONG_TERM_REF; as_ref_pic_lt_foll[i] = ps_pic_buf; } } for(i = 0; i < (WORD32)u4_num_st_curr_before; i++) { ps_pic_buf = ihevc_dpb_mgr_get_ref_by_poc(ps_dpb_mgr, ai4_poc_st_curr_before[i]); if(NULL != ps_pic_buf) ps_pic_buf->u1_used_as_ref = SHORT_TERM_REF; as_ref_pic_st_curr_before[i] = ps_pic_buf; } for(i = 0; i < (WORD32)u4_num_st_curr_after; i++) { ps_pic_buf = ihevc_dpb_mgr_get_ref_by_poc(ps_dpb_mgr, ai4_poc_st_curr_after[i]); if(NULL != ps_pic_buf) ps_pic_buf->u1_used_as_ref = SHORT_TERM_REF; as_ref_pic_st_curr_after[i] = ps_pic_buf; } for(i = 0; i < (WORD32)u4_num_st_foll; i++) { ps_pic_buf = ihevc_dpb_mgr_get_ref_by_poc(ps_dpb_mgr, ai4_poc_st_foll[i]); if(NULL != ps_pic_buf) ps_pic_buf->u1_used_as_ref = SHORT_TERM_REF; as_ref_pic_st_foll[i] = ps_pic_buf; } u4_num_rps_curr_temp_list0 = (WORD32)u4_num_total_curr > ps_slice_hdr->i1_num_ref_idx_l0_active ? (WORD32)u4_num_total_curr : ps_slice_hdr->i1_num_ref_idx_l0_active; r_idx = 0; if((PSLICE == ps_slice_hdr->i1_slice_type) || (BSLICE == ps_slice_hdr->i1_slice_type)) { while(r_idx < u4_num_rps_curr_temp_list0) { for(i = 0; (i < (WORD32)u4_num_st_curr_before) && (r_idx < u4_num_rps_curr_temp_list0); r_idx++, i++) { if(NULL == as_ref_pic_st_curr_before[i]) { as_ref_pic_st_curr_before[i] = ihevc_dpb_mgr_get_ref_by_nearest_poc(ps_dpb_mgr, ai4_poc_st_curr_before[i]); } as_ref_pic_list_temp0[r_idx] = as_ref_pic_st_curr_before[i]; } for(i = 0; (i < (WORD32)u4_num_st_curr_after) && (r_idx < u4_num_rps_curr_temp_list0); r_idx++, i++) { if(NULL == as_ref_pic_st_curr_after[i]) { as_ref_pic_st_curr_after[i] = ihevc_dpb_mgr_get_ref_by_nearest_poc(ps_dpb_mgr, ai4_poc_st_curr_after[i]); } as_ref_pic_list_temp0[r_idx] = as_ref_pic_st_curr_after[i]; } for(i = 0; (i < (WORD32)u4_num_lt_curr) && (r_idx < u4_num_rps_curr_temp_list0); r_idx++, i++) { if(NULL == as_ref_pic_lt_curr[i]) { as_ref_pic_lt_curr[i] = ihevc_dpb_mgr_get_ref_by_nearest_poc(ps_dpb_mgr, ai4_poc_lt_curr[i]); } as_ref_pic_list_temp0[r_idx] = as_ref_pic_lt_curr[i]; } } for(r_idx = 0; (WORD32)r_idx < ps_slice_hdr->i1_num_ref_idx_l0_active; r_idx++) { pic_buf_t *ps_pic_buf; ps_slice_hdr->as_ref_pic_list0[r_idx].pv_pic_buf = ps_slice_hdr->s_rplm.i1_ref_pic_list_modification_flag_l0 ? (void *)as_ref_pic_list_temp0[ps_slice_hdr->s_rplm.i1_list_entry_l0[r_idx]] : (void *)as_ref_pic_list_temp0[r_idx]; ps_pic_buf = (pic_buf_t *)ps_slice_hdr->as_ref_pic_list0[r_idx].pv_pic_buf; if(ps_pic_buf == NULL) return IHEVCD_REF_PIC_NOT_FOUND; ps_mv_buf = ihevcd_mv_mgr_get_poc(ps_mv_buf_mgr, ps_pic_buf->i4_abs_poc); ps_slice_hdr->as_ref_pic_list0[r_idx].pv_mv_buf = ps_mv_buf; } if(ps_slice_hdr->i1_slice_type == BSLICE) { u4_num_rps_curr_temp_list1 = (WORD32)u4_num_total_curr > ps_slice_hdr->i1_num_ref_idx_l1_active ? (WORD32)u4_num_total_curr : ps_slice_hdr->i1_num_ref_idx_l1_active; r_idx = 0; while(r_idx < u4_num_rps_curr_temp_list1) { for(i = 0; (i < (WORD32)u4_num_st_curr_after) && (r_idx < u4_num_rps_curr_temp_list1); r_idx++, i++) { if(NULL == as_ref_pic_st_curr_after[i]) { as_ref_pic_st_curr_after[i] = ihevc_dpb_mgr_get_ref_by_nearest_poc(ps_dpb_mgr, ai4_poc_st_curr_after[i]); } as_ref_pic_list_temp1[r_idx] = as_ref_pic_st_curr_after[i]; } for(i = 0; (i < (WORD32)u4_num_st_curr_before) && (r_idx < u4_num_rps_curr_temp_list1); r_idx++, i++) { if(NULL == as_ref_pic_st_curr_before[i]) { as_ref_pic_st_curr_before[i] = ihevc_dpb_mgr_get_ref_by_nearest_poc(ps_dpb_mgr, ai4_poc_st_curr_before[i]); } as_ref_pic_list_temp1[r_idx] = as_ref_pic_st_curr_before[i]; } for(i = 0; (i < (WORD32)u4_num_lt_curr) && (r_idx < u4_num_rps_curr_temp_list1); r_idx++, i++) { if(NULL == as_ref_pic_lt_curr[i]) { as_ref_pic_lt_curr[i] = ihevc_dpb_mgr_get_ref_by_nearest_poc(ps_dpb_mgr, ai4_poc_lt_curr[i]); } as_ref_pic_list_temp1[r_idx] = as_ref_pic_lt_curr[i]; } } for(r_idx = 0; (WORD32)r_idx < ps_slice_hdr->i1_num_ref_idx_l1_active; r_idx++) { pic_buf_t *ps_pic_buf; ps_slice_hdr->as_ref_pic_list1[r_idx].pv_pic_buf = ps_slice_hdr->s_rplm.i1_ref_pic_list_modification_flag_l1 ? (void *)as_ref_pic_list_temp1[ps_slice_hdr->s_rplm.i1_list_entry_l1[r_idx]] : (void *)as_ref_pic_list_temp1[r_idx]; ps_pic_buf = (pic_buf_t *)ps_slice_hdr->as_ref_pic_list1[r_idx].pv_pic_buf; if(ps_pic_buf == NULL) return IHEVCD_REF_PIC_NOT_FOUND; ps_mv_buf = ihevcd_mv_mgr_get_poc(ps_mv_buf_mgr, ps_pic_buf->i4_abs_poc); ps_slice_hdr->as_ref_pic_list1[r_idx].pv_mv_buf = ps_mv_buf; } } } DEBUG_PRINT_REF_LIST_POCS(i4_pic_order_cnt_val, ps_slice_hdr, ps_dpb_mgr, u4_num_st_curr_before, u4_num_st_curr_after, u4_num_st_foll, u4_num_lt_curr, u4_num_lt_foll, ai4_poc_st_curr_before, ai4_poc_st_curr_after, ai4_poc_st_foll, ai4_poc_lt_curr, ai4_poc_lt_foll); /* Buffers that are still marked as UNUSED_FOR_REF are released from dpb (internally dpb calls release from pic buf manager)*/ for(i = 0; i < MAX_DPB_BUFS; i++) { if((ps_dpb_mgr->as_dpb_info[i].ps_pic_buf) && (UNUSED_FOR_REF == ps_dpb_mgr->as_dpb_info[i].ps_pic_buf->u1_used_as_ref)) { pic_buf_t *ps_pic_buf = ps_dpb_mgr->as_dpb_info[i].ps_pic_buf; mv_buf_t *ps_mv_buf; /* Long term index is set to MAX_DPB_BUFS to ensure it is not added as LT */ ihevc_dpb_mgr_del_ref(ps_dpb_mgr, (buf_mgr_t *)ps_codec->pv_pic_buf_mgr, ps_pic_buf->i4_abs_poc); /* Find buffer id of the MV bank corresponding to the buffer being freed (Buffer with POC of u4_abs_poc) */ ps_mv_buf = (mv_buf_t *)ps_codec->ps_mv_buf; for(i = 0; i < BUF_MGR_MAX_CNT; i++) { if(ps_mv_buf && ps_mv_buf->i4_abs_poc == ps_pic_buf->i4_abs_poc) { ihevc_buf_mgr_release((buf_mgr_t *)ps_codec->pv_mv_buf_mgr, i, BUF_MGR_REF); break; } ps_mv_buf++; } } } return IHEVCD_SUCCESS; } Commit Message: Check only allocated mv bufs for releasing from reference When checking mv bufs for releasing from reference, unallocated mv bufs were also checked. This issue was fixed by restricting the loop count to allocated number of mv bufs. Bug: 34896906 Bug: 34819017 Change-Id: If832f590b301f414d4cd5206414efc61a70c17cb (cherry picked from commit 23bfe3e06d53ea749073a5d7ceda84239742b2c2) CWE ID:
WORD32 ihevcd_ref_list(codec_t *ps_codec, pps_t *ps_pps, sps_t *ps_sps, slice_header_t *ps_slice_hdr) { WORD32 i, j; WORD32 st_rps_idx; WORD32 num_neg_pics, num_pos_pics; WORD8 *pi1_used; WORD16 *pi2_delta_poc; UWORD32 u4_max_poc_lsb; pic_buf_t *ps_pic_buf; mv_buf_t *ps_mv_buf; UWORD32 r_idx; dpb_mgr_t *ps_dpb_mgr = (dpb_mgr_t *)ps_codec->pv_dpb_mgr; buf_mgr_t *ps_mv_buf_mgr = (buf_mgr_t *)ps_codec->pv_mv_buf_mgr; WORD32 ai4_poc_st_curr_before[MAX_DPB_SIZE], ai4_poc_st_foll[MAX_DPB_SIZE], ai4_poc_st_curr_after[MAX_DPB_SIZE]; WORD32 ai4_poc_lt_curr[MAX_DPB_SIZE], ai4_poc_lt_foll[MAX_DPB_SIZE]; UWORD32 u4_num_st_curr_before, u4_num_st_foll, u4_num_st_curr_after, u4_num_lt_curr, u4_num_lt_foll; UWORD32 u4_num_total_curr; WORD8 ai1_curr_delta_poc_msb_present_flag[MAX_DPB_SIZE], ai1_foll_delta_poc_msb_present_flag[MAX_DPB_SIZE]; pic_buf_t *as_ref_pic_lt_curr[MAX_DPB_SIZE]; pic_buf_t *as_ref_pic_lt_foll[MAX_DPB_SIZE]; pic_buf_t *as_ref_pic_st_curr_after[MAX_DPB_SIZE]; pic_buf_t *as_ref_pic_st_curr_before[MAX_DPB_SIZE]; pic_buf_t *as_ref_pic_st_foll[MAX_DPB_SIZE]; pic_buf_t *as_ref_pic_list_temp0[MAX_DPB_SIZE], *as_ref_pic_list_temp1[MAX_DPB_SIZE]; UWORD32 u4_num_rps_curr_temp_list0, u4_num_rps_curr_temp_list1; WORD32 i4_pic_order_cnt_val; WORD32 i4_poc_lt; UNUSED(as_ref_pic_lt_foll); UNUSED(as_ref_pic_st_foll); UNUSED(ps_pps); RETURN_IF_NAL_INFO; u4_max_poc_lsb = (1 << ps_sps->i1_log2_max_pic_order_cnt_lsb); i4_pic_order_cnt_val = ps_slice_hdr->i4_abs_pic_order_cnt; if(1 == ps_slice_hdr->i1_short_term_ref_pic_set_sps_flag) { st_rps_idx = ps_slice_hdr->i1_short_term_ref_pic_set_idx; num_neg_pics = ps_sps->as_stref_picset[st_rps_idx].i1_num_neg_pics; num_pos_pics = ps_sps->as_stref_picset[st_rps_idx].i1_num_pos_pics; pi1_used = ps_sps->as_stref_picset[st_rps_idx].ai1_used; pi2_delta_poc = ps_sps->as_stref_picset[st_rps_idx].ai2_delta_poc; } else { st_rps_idx = ps_sps->i1_num_short_term_ref_pic_sets; num_neg_pics = ps_slice_hdr->s_stref_picset.i1_num_neg_pics; num_pos_pics = ps_slice_hdr->s_stref_picset.i1_num_pos_pics; pi1_used = ps_slice_hdr->s_stref_picset.ai1_used; pi2_delta_poc = ps_slice_hdr->s_stref_picset.ai2_delta_poc; } u4_num_st_curr_before = 0; u4_num_st_foll = 0; for(i = 0; i < num_neg_pics; i++) { if(pi1_used[i]) { ai4_poc_st_curr_before[u4_num_st_curr_before] = i4_pic_order_cnt_val + pi2_delta_poc[i]; u4_num_st_curr_before++; } else { ai4_poc_st_foll[u4_num_st_foll] = i4_pic_order_cnt_val + pi2_delta_poc[i]; u4_num_st_foll++; } } u4_num_st_curr_after = 0; for(i = num_neg_pics; i < num_neg_pics + num_pos_pics; i++) { if(pi1_used[i]) { ai4_poc_st_curr_after[u4_num_st_curr_after] = i4_pic_order_cnt_val + pi2_delta_poc[i]; u4_num_st_curr_after++; } else { ai4_poc_st_foll[u4_num_st_foll] = i4_pic_order_cnt_val + pi2_delta_poc[i]; u4_num_st_foll++; } } u4_num_lt_curr = 0; u4_num_lt_foll = 0; for(i = 0; i < ps_slice_hdr->i1_num_long_term_sps + ps_slice_hdr->i1_num_long_term_pics; i++) { i4_poc_lt = ps_slice_hdr->ai4_poc_lsb_lt[i]; if(ps_slice_hdr->ai1_delta_poc_msb_present_flag[i]) { i4_poc_lt += i4_pic_order_cnt_val - ps_slice_hdr->ai1_delta_poc_msb_cycle_lt[i] * u4_max_poc_lsb - ps_slice_hdr->i4_pic_order_cnt_lsb; } if(ps_slice_hdr->ai1_used_by_curr_pic_lt_flag[i]) { ai4_poc_lt_curr[u4_num_lt_curr] = i4_poc_lt; ai1_curr_delta_poc_msb_present_flag[u4_num_lt_curr] = ps_slice_hdr->ai1_delta_poc_msb_present_flag[i]; u4_num_lt_curr++; } else { ai4_poc_lt_foll[u4_num_lt_foll] = i4_poc_lt; ai1_foll_delta_poc_msb_present_flag[u4_num_lt_foll] = ps_slice_hdr->ai1_delta_poc_msb_present_flag[i]; u4_num_lt_foll++; } } u4_num_total_curr = u4_num_lt_curr + u4_num_st_curr_after + u4_num_st_curr_before; /* Bit stream conformance tests */ /* for(i = 0; i < u4_num_lt_curr; i++) { int j; if(ai1_curr_delta_poc_msb_present_flag[i]) { for(j = 0; j < u4_num_st_curr_before; j++) { ASSERT(ai4_poc_st_curr_before[j] != ai4_poc_lt_curr[i]); } for(j = 0; j < u4_num_st_curr_after; j++) { ASSERT(ai4_poc_st_curr_after[j] != ai4_poc_lt_curr[i]); } for(j = 0; j < u4_num_st_foll; j++) { ASSERT(ai4_poc_st_foll[j] != ai4_poc_lt_curr[i]); } for(j = 0; j < u4_num_lt_curr; j++) { ASSERT((ai4_poc_lt_curr[j] != ai4_poc_lt_curr[i]) || (j == i)); } } else { for(j = 0; j < u4_num_st_curr_before; j++) { ASSERT((ai4_poc_st_curr_before[j] & (u4_max_poc_lsb - 1)) != ai4_poc_lt_curr[i]); } for(j = 0; j < u4_num_st_curr_after; j++) { ASSERT((ai4_poc_st_curr_after[j] & (u4_max_poc_lsb - 1)) != ai4_poc_lt_curr[i]); } for(j = 0; j < u4_num_st_foll; j++) { ASSERT((ai4_poc_st_foll[j] & (u4_max_poc_lsb - 1)) != ai4_poc_lt_curr[i]); } for(j = 0; j < u4_num_lt_curr; j++) { ASSERT(((ai4_poc_lt_curr[j] & (u4_max_poc_lsb - 1)) != ai4_poc_lt_curr[i]) || (j == i)); } } } for(i = 0; i < u4_num_lt_foll; i++) { int j; if(ai1_foll_delta_poc_msb_present_flag[i]) { for(j = 0; j < u4_num_st_curr_before; j++) { ASSERT(ai4_poc_st_curr_before[j] != ai4_poc_lt_foll[i]); } for(j = 0; j < u4_num_st_curr_after; j++) { ASSERT(ai4_poc_st_curr_after[j] != ai4_poc_lt_foll[i]); } for(j = 0; j < u4_num_st_foll; j++) { ASSERT(ai4_poc_st_foll[j] != ai4_poc_lt_foll[i]); } for(j = 0; j < u4_num_lt_curr; j++) { ASSERT(ai4_poc_lt_curr[j] != ai4_poc_lt_foll[i]); } for(j = 0; j < u4_num_lt_foll; j++) { ASSERT((ai4_poc_lt_foll[j] != ai4_poc_lt_foll[i]) || (j == i)); } } else { for(j = 0; j < u4_num_st_curr_before; j++) { ASSERT((ai4_poc_st_curr_before[j] & (u4_max_poc_lsb - 1)) != ai4_poc_lt_foll[i]); } for(j = 0; j < u4_num_st_curr_after; j++) { ASSERT((ai4_poc_st_curr_after[j] & (u4_max_poc_lsb - 1)) != ai4_poc_lt_foll[i]); } for(j = 0; j < u4_num_st_foll; j++) { ASSERT((ai4_poc_st_foll[j] & (u4_max_poc_lsb - 1)) != ai4_poc_lt_foll[i]); } for(j = 0; j < u4_num_lt_curr; j++) { ASSERT((ai4_poc_lt_curr[j] & (u4_max_poc_lsb - 1)) != ai4_poc_lt_foll[i]); } for(j = 0; j < u4_num_lt_foll; j++) { ASSERT(((ai4_poc_lt_foll[j] & (u4_max_poc_lsb - 1)) != ai4_poc_lt_foll[i]) || (j == i)); } } } */ /* Reference Pic sets creation */ /* Set all the DPB buffers to UNUSED_FOR_REF */ if(0 == ps_codec->i4_pic_present) { for(i = 0; i < MAX_DPB_BUFS; i++) { if(ps_dpb_mgr->as_dpb_info[i].ps_pic_buf) ps_dpb_mgr->as_dpb_info[i].ps_pic_buf->u1_used_as_ref = UNUSED_FOR_REF; } } for(i = 0; i < (WORD32)u4_num_lt_curr; i++) { if(0 == ai1_curr_delta_poc_msb_present_flag[i]) { ps_pic_buf = ihevc_dpb_mgr_get_ref_by_poc_lsb(ps_dpb_mgr, ai4_poc_lt_curr[i]); if(NULL != ps_pic_buf) ps_pic_buf->u1_used_as_ref = LONG_TERM_REF; as_ref_pic_lt_curr[i] = ps_pic_buf; } else { ps_pic_buf = ihevc_dpb_mgr_get_ref_by_poc(ps_dpb_mgr, ai4_poc_lt_curr[i]); if(NULL != ps_pic_buf) ps_pic_buf->u1_used_as_ref = LONG_TERM_REF; as_ref_pic_lt_curr[i] = ps_pic_buf; } } for(i = 0; i < (WORD32)u4_num_lt_foll; i++) { if(0 == ai1_foll_delta_poc_msb_present_flag[i]) { ps_pic_buf = ihevc_dpb_mgr_get_ref_by_poc_lsb(ps_dpb_mgr, ai4_poc_lt_foll[i]); if(NULL != ps_pic_buf) ps_pic_buf->u1_used_as_ref = LONG_TERM_REF; as_ref_pic_lt_foll[i] = ps_pic_buf; } else { ps_pic_buf = ihevc_dpb_mgr_get_ref_by_poc(ps_dpb_mgr, ai4_poc_lt_foll[i]); if(NULL != ps_pic_buf) ps_pic_buf->u1_used_as_ref = LONG_TERM_REF; as_ref_pic_lt_foll[i] = ps_pic_buf; } } for(i = 0; i < (WORD32)u4_num_st_curr_before; i++) { ps_pic_buf = ihevc_dpb_mgr_get_ref_by_poc(ps_dpb_mgr, ai4_poc_st_curr_before[i]); if(NULL != ps_pic_buf) ps_pic_buf->u1_used_as_ref = SHORT_TERM_REF; as_ref_pic_st_curr_before[i] = ps_pic_buf; } for(i = 0; i < (WORD32)u4_num_st_curr_after; i++) { ps_pic_buf = ihevc_dpb_mgr_get_ref_by_poc(ps_dpb_mgr, ai4_poc_st_curr_after[i]); if(NULL != ps_pic_buf) ps_pic_buf->u1_used_as_ref = SHORT_TERM_REF; as_ref_pic_st_curr_after[i] = ps_pic_buf; } for(i = 0; i < (WORD32)u4_num_st_foll; i++) { ps_pic_buf = ihevc_dpb_mgr_get_ref_by_poc(ps_dpb_mgr, ai4_poc_st_foll[i]); if(NULL != ps_pic_buf) ps_pic_buf->u1_used_as_ref = SHORT_TERM_REF; as_ref_pic_st_foll[i] = ps_pic_buf; } u4_num_rps_curr_temp_list0 = (WORD32)u4_num_total_curr > ps_slice_hdr->i1_num_ref_idx_l0_active ? (WORD32)u4_num_total_curr : ps_slice_hdr->i1_num_ref_idx_l0_active; r_idx = 0; if((PSLICE == ps_slice_hdr->i1_slice_type) || (BSLICE == ps_slice_hdr->i1_slice_type)) { while(r_idx < u4_num_rps_curr_temp_list0) { for(i = 0; (i < (WORD32)u4_num_st_curr_before) && (r_idx < u4_num_rps_curr_temp_list0); r_idx++, i++) { if(NULL == as_ref_pic_st_curr_before[i]) { as_ref_pic_st_curr_before[i] = ihevc_dpb_mgr_get_ref_by_nearest_poc(ps_dpb_mgr, ai4_poc_st_curr_before[i]); } as_ref_pic_list_temp0[r_idx] = as_ref_pic_st_curr_before[i]; } for(i = 0; (i < (WORD32)u4_num_st_curr_after) && (r_idx < u4_num_rps_curr_temp_list0); r_idx++, i++) { if(NULL == as_ref_pic_st_curr_after[i]) { as_ref_pic_st_curr_after[i] = ihevc_dpb_mgr_get_ref_by_nearest_poc(ps_dpb_mgr, ai4_poc_st_curr_after[i]); } as_ref_pic_list_temp0[r_idx] = as_ref_pic_st_curr_after[i]; } for(i = 0; (i < (WORD32)u4_num_lt_curr) && (r_idx < u4_num_rps_curr_temp_list0); r_idx++, i++) { if(NULL == as_ref_pic_lt_curr[i]) { as_ref_pic_lt_curr[i] = ihevc_dpb_mgr_get_ref_by_nearest_poc(ps_dpb_mgr, ai4_poc_lt_curr[i]); } as_ref_pic_list_temp0[r_idx] = as_ref_pic_lt_curr[i]; } } for(r_idx = 0; (WORD32)r_idx < ps_slice_hdr->i1_num_ref_idx_l0_active; r_idx++) { pic_buf_t *ps_pic_buf; ps_slice_hdr->as_ref_pic_list0[r_idx].pv_pic_buf = ps_slice_hdr->s_rplm.i1_ref_pic_list_modification_flag_l0 ? (void *)as_ref_pic_list_temp0[ps_slice_hdr->s_rplm.i1_list_entry_l0[r_idx]] : (void *)as_ref_pic_list_temp0[r_idx]; ps_pic_buf = (pic_buf_t *)ps_slice_hdr->as_ref_pic_list0[r_idx].pv_pic_buf; if(ps_pic_buf == NULL) return IHEVCD_REF_PIC_NOT_FOUND; ps_mv_buf = ihevcd_mv_mgr_get_poc(ps_mv_buf_mgr, ps_pic_buf->i4_abs_poc); ps_slice_hdr->as_ref_pic_list0[r_idx].pv_mv_buf = ps_mv_buf; } if(ps_slice_hdr->i1_slice_type == BSLICE) { u4_num_rps_curr_temp_list1 = (WORD32)u4_num_total_curr > ps_slice_hdr->i1_num_ref_idx_l1_active ? (WORD32)u4_num_total_curr : ps_slice_hdr->i1_num_ref_idx_l1_active; r_idx = 0; while(r_idx < u4_num_rps_curr_temp_list1) { for(i = 0; (i < (WORD32)u4_num_st_curr_after) && (r_idx < u4_num_rps_curr_temp_list1); r_idx++, i++) { if(NULL == as_ref_pic_st_curr_after[i]) { as_ref_pic_st_curr_after[i] = ihevc_dpb_mgr_get_ref_by_nearest_poc(ps_dpb_mgr, ai4_poc_st_curr_after[i]); } as_ref_pic_list_temp1[r_idx] = as_ref_pic_st_curr_after[i]; } for(i = 0; (i < (WORD32)u4_num_st_curr_before) && (r_idx < u4_num_rps_curr_temp_list1); r_idx++, i++) { if(NULL == as_ref_pic_st_curr_before[i]) { as_ref_pic_st_curr_before[i] = ihevc_dpb_mgr_get_ref_by_nearest_poc(ps_dpb_mgr, ai4_poc_st_curr_before[i]); } as_ref_pic_list_temp1[r_idx] = as_ref_pic_st_curr_before[i]; } for(i = 0; (i < (WORD32)u4_num_lt_curr) && (r_idx < u4_num_rps_curr_temp_list1); r_idx++, i++) { if(NULL == as_ref_pic_lt_curr[i]) { as_ref_pic_lt_curr[i] = ihevc_dpb_mgr_get_ref_by_nearest_poc(ps_dpb_mgr, ai4_poc_lt_curr[i]); } as_ref_pic_list_temp1[r_idx] = as_ref_pic_lt_curr[i]; } } for(r_idx = 0; (WORD32)r_idx < ps_slice_hdr->i1_num_ref_idx_l1_active; r_idx++) { pic_buf_t *ps_pic_buf; ps_slice_hdr->as_ref_pic_list1[r_idx].pv_pic_buf = ps_slice_hdr->s_rplm.i1_ref_pic_list_modification_flag_l1 ? (void *)as_ref_pic_list_temp1[ps_slice_hdr->s_rplm.i1_list_entry_l1[r_idx]] : (void *)as_ref_pic_list_temp1[r_idx]; ps_pic_buf = (pic_buf_t *)ps_slice_hdr->as_ref_pic_list1[r_idx].pv_pic_buf; if(ps_pic_buf == NULL) return IHEVCD_REF_PIC_NOT_FOUND; ps_mv_buf = ihevcd_mv_mgr_get_poc(ps_mv_buf_mgr, ps_pic_buf->i4_abs_poc); ps_slice_hdr->as_ref_pic_list1[r_idx].pv_mv_buf = ps_mv_buf; } } } DEBUG_PRINT_REF_LIST_POCS(i4_pic_order_cnt_val, ps_slice_hdr, ps_dpb_mgr, u4_num_st_curr_before, u4_num_st_curr_after, u4_num_st_foll, u4_num_lt_curr, u4_num_lt_foll, ai4_poc_st_curr_before, ai4_poc_st_curr_after, ai4_poc_st_foll, ai4_poc_lt_curr, ai4_poc_lt_foll); /* Buffers that are still marked as UNUSED_FOR_REF are released from dpb (internally dpb calls release from pic buf manager)*/ for(i = 0; i < MAX_DPB_BUFS; i++) { if((ps_dpb_mgr->as_dpb_info[i].ps_pic_buf) && (UNUSED_FOR_REF == ps_dpb_mgr->as_dpb_info[i].ps_pic_buf->u1_used_as_ref)) { pic_buf_t *ps_pic_buf = ps_dpb_mgr->as_dpb_info[i].ps_pic_buf; mv_buf_t *ps_mv_buf; /* Long term index is set to MAX_DPB_BUFS to ensure it is not added as LT */ ihevc_dpb_mgr_del_ref(ps_dpb_mgr, (buf_mgr_t *)ps_codec->pv_pic_buf_mgr, ps_pic_buf->i4_abs_poc); /* Find buffer id of the MV bank corresponding to the buffer being freed (Buffer with POC of u4_abs_poc) */ ps_mv_buf = (mv_buf_t *)ps_codec->ps_mv_buf; for(j = 0; j < ps_codec->i4_max_dpb_size; j++) { if(ps_mv_buf && ps_mv_buf->i4_abs_poc == ps_pic_buf->i4_abs_poc) { ihevc_buf_mgr_release((buf_mgr_t *)ps_codec->pv_mv_buf_mgr, j, BUF_MGR_REF); break; } ps_mv_buf++; } } } return IHEVCD_SUCCESS; }
173,998
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void BufferQueueConsumer::dump(String8& result, const char* prefix) const { const IPCThreadState* ipc = IPCThreadState::self(); const pid_t pid = ipc->getCallingPid(); const uid_t uid = ipc->getCallingUid(); if ((uid != AID_SHELL) && !PermissionCache::checkPermission(String16( "android.permission.DUMP"), pid, uid)) { result.appendFormat("Permission Denial: can't dump BufferQueueConsumer " "from pid=%d, uid=%d\n", pid, uid); } else { mCore->dump(result, prefix); } } Commit Message: Add SN logging Bug 27046057 Change-Id: Iede7c92e59e60795df1ec7768ebafd6b090f1c27 CWE ID: CWE-264
void BufferQueueConsumer::dump(String8& result, const char* prefix) const { const IPCThreadState* ipc = IPCThreadState::self(); const pid_t pid = ipc->getCallingPid(); const uid_t uid = ipc->getCallingUid(); if ((uid != AID_SHELL) && !PermissionCache::checkPermission(String16( "android.permission.DUMP"), pid, uid)) { result.appendFormat("Permission Denial: can't dump BufferQueueConsumer " "from pid=%d, uid=%d\n", pid, uid); android_errorWriteWithInfoLog(0x534e4554, "27046057", uid, NULL, 0); } else { mCore->dump(result, prefix); } }
173,894
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp) { if (d_mountpoint(dentry)) return 1; if (nfsd4_is_junction(dentry)) return 1; if (!(exp->ex_flags & NFSEXP_V4ROOT)) return 0; return d_inode(dentry) != NULL; } Commit Message: Merge tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux Pull nfsd updates from Bruce Fields: "Another RDMA update from Chuck Lever, and a bunch of miscellaneous bugfixes" * tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux: (26 commits) nfsd: Fix up the "supattr_exclcreat" attributes nfsd: encoders mustn't use unitialized values in error cases nfsd: fix undefined behavior in nfsd4_layout_verify lockd: fix lockd shutdown race NFSv4: Fix callback server shutdown SUNRPC: Refactor svc_set_num_threads() NFSv4.x/callback: Create the callback service through svc_create_pooled lockd: remove redundant check on block svcrdma: Clean out old XDR encoders svcrdma: Remove the req_map cache svcrdma: Remove unused RDMA Write completion handler svcrdma: Reduce size of sge array in struct svc_rdma_op_ctxt svcrdma: Clean up RPC-over-RDMA backchannel reply processing svcrdma: Report Write/Reply chunk overruns svcrdma: Clean up RDMA_ERROR path svcrdma: Use rdma_rw API in RPC reply path svcrdma: Introduce local rdma_rw API helpers svcrdma: Clean up svc_rdma_get_inv_rkey() svcrdma: Add helper to save pages under I/O svcrdma: Eliminate RPCRDMA_SQ_DEPTH_MULT ... CWE ID: CWE-404
int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp) { if (!d_inode(dentry)) return 0; if (exp->ex_flags & NFSEXP_V4ROOT) return 1; if (nfsd4_is_junction(dentry)) return 1; if (d_mountpoint(dentry)) /* * Might only be a mountpoint in a different namespace, * but we need to check. */ return 2; return 0; }
168,154
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void DevToolsWindow::InspectedContentsClosing() { web_contents_->GetRenderViewHost()->ClosePage(); } Commit Message: DevTools: handle devtools renderer unresponsiveness during beforeunload event interception This patch fixes the crash which happenes under the following conditions: 1. DevTools window is in undocked state 2. DevTools renderer is unresponsive 3. User attempts to close inspected page BUG=322380 Review URL: https://codereview.chromium.org/84883002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@237611 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-264
void DevToolsWindow::InspectedContentsClosing() { intercepted_page_beforeunload_ = false; web_contents_->GetRenderViewHost()->ClosePage(); }
171,267
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void pcrypt_free(struct crypto_instance *inst) { struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst); crypto_drop_aead(&ctx->spawn); kfree(inst); } Commit Message: crypto: pcrypt - fix freeing pcrypt instances pcrypt is using the old way of freeing instances, where the ->free() method specified in the 'struct crypto_template' is passed a pointer to the 'struct crypto_instance'. But the crypto_instance is being kfree()'d directly, which is incorrect because the memory was actually allocated as an aead_instance, which contains the crypto_instance at a nonzero offset. Thus, the wrong pointer was being kfree()'d. Fix it by switching to the new way to free aead_instance's where the ->free() method is specified in the aead_instance itself. Reported-by: syzbot <[email protected]> Fixes: 0496f56065e0 ("crypto: pcrypt - Add support for new AEAD interface") Cc: <[email protected]> # v4.2+ Signed-off-by: Eric Biggers <[email protected]> Signed-off-by: Herbert Xu <[email protected]> CWE ID: CWE-763
static void pcrypt_free(struct crypto_instance *inst)
169,425
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: kg_unseal_v1_iov(krb5_context context, OM_uint32 *minor_status, krb5_gss_ctx_id_rec *ctx, gss_iov_buffer_desc *iov, int iov_count, size_t token_wrapper_len, int *conf_state, gss_qop_t *qop_state, int toktype) { OM_uint32 code; gss_iov_buffer_t header; gss_iov_buffer_t trailer; unsigned char *ptr; int sealalg; int signalg; krb5_checksum cksum; krb5_checksum md5cksum; size_t cksum_len = 0; size_t conflen = 0; int direction; krb5_ui_4 seqnum; OM_uint32 retval; size_t sumlen; krb5_keyusage sign_usage = KG_USAGE_SIGN; md5cksum.length = cksum.length = 0; md5cksum.contents = cksum.contents = NULL; header = kg_locate_header_iov(iov, iov_count, toktype); assert(header != NULL); trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER); if (trailer != NULL && trailer->buffer.length != 0) { *minor_status = (OM_uint32)KRB5_BAD_MSIZE; return GSS_S_DEFECTIVE_TOKEN; } if (header->buffer.length < token_wrapper_len + 14) { *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; } ptr = (unsigned char *)header->buffer.value + token_wrapper_len; signalg = ptr[0]; signalg |= ptr[1] << 8; sealalg = ptr[2]; sealalg |= ptr[3] << 8; if (ptr[4] != 0xFF || ptr[5] != 0xFF) { *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; } if (toktype != KG_TOK_WRAP_MSG && sealalg != 0xFFFF) { *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; } if (toktype == KG_TOK_WRAP_MSG && !(sealalg == 0xFFFF || sealalg == ctx->sealalg)) { *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; } if ((ctx->sealalg == SEAL_ALG_NONE && signalg > 1) || (ctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) || (ctx->sealalg == SEAL_ALG_DES3KD && signalg != SGN_ALG_HMAC_SHA1_DES3_KD)|| (ctx->sealalg == SEAL_ALG_MICROSOFT_RC4 && signalg != SGN_ALG_HMAC_MD5)) { *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; } switch (signalg) { case SGN_ALG_DES_MAC_MD5: case SGN_ALG_MD2_5: case SGN_ALG_HMAC_MD5: cksum_len = 8; if (toktype != KG_TOK_WRAP_MSG) sign_usage = 15; break; case SGN_ALG_3: cksum_len = 16; break; case SGN_ALG_HMAC_SHA1_DES3_KD: cksum_len = 20; break; default: *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; } /* get the token parameters */ code = kg_get_seq_num(context, ctx->seq, ptr + 14, ptr + 6, &direction, &seqnum); if (code != 0) { *minor_status = code; return GSS_S_BAD_SIG; } /* decode the message, if SEAL */ if (toktype == KG_TOK_WRAP_MSG) { if (sealalg != 0xFFFF) { if (ctx->sealalg == SEAL_ALG_MICROSOFT_RC4) { unsigned char bigend_seqnum[4]; krb5_keyblock *enc_key; size_t i; store_32_be(seqnum, bigend_seqnum); code = krb5_k_key_keyblock(context, ctx->enc, &enc_key); if (code != 0) { retval = GSS_S_FAILURE; goto cleanup; } assert(enc_key->length == 16); for (i = 0; i < enc_key->length; i++) ((char *)enc_key->contents)[i] ^= 0xF0; code = kg_arcfour_docrypt_iov(context, enc_key, 0, &bigend_seqnum[0], 4, iov, iov_count); krb5_free_keyblock(context, enc_key); } else { code = kg_decrypt_iov(context, 0, ((ctx->gss_flags & GSS_C_DCE_STYLE) != 0), 0 /*EC*/, 0 /*RRC*/, ctx->enc, KG_USAGE_SEAL, NULL, iov, iov_count); } if (code != 0) { retval = GSS_S_FAILURE; goto cleanup; } } conflen = kg_confounder_size(context, ctx->enc->keyblock.enctype); } if (header->buffer.length != token_wrapper_len + 14 + cksum_len + conflen) { retval = GSS_S_DEFECTIVE_TOKEN; goto cleanup; } /* compute the checksum of the message */ /* initialize the checksum */ switch (signalg) { case SGN_ALG_DES_MAC_MD5: case SGN_ALG_MD2_5: case SGN_ALG_DES_MAC: case SGN_ALG_3: md5cksum.checksum_type = CKSUMTYPE_RSA_MD5; break; case SGN_ALG_HMAC_MD5: md5cksum.checksum_type = CKSUMTYPE_HMAC_MD5_ARCFOUR; break; case SGN_ALG_HMAC_SHA1_DES3_KD: md5cksum.checksum_type = CKSUMTYPE_HMAC_SHA1_DES3; break; default: abort(); } code = krb5_c_checksum_length(context, md5cksum.checksum_type, &sumlen); if (code != 0) { retval = GSS_S_FAILURE; goto cleanup; } md5cksum.length = sumlen; /* compute the checksum of the message */ code = kg_make_checksum_iov_v1(context, md5cksum.checksum_type, cksum_len, ctx->seq, ctx->enc, sign_usage, iov, iov_count, toktype, &md5cksum); if (code != 0) { retval = GSS_S_FAILURE; goto cleanup; } switch (signalg) { case SGN_ALG_DES_MAC_MD5: case SGN_ALG_3: code = kg_encrypt_inplace(context, ctx->seq, KG_USAGE_SEAL, (g_OID_equal(ctx->mech_used, gss_mech_krb5_old) ? ctx->seq->keyblock.contents : NULL), md5cksum.contents, 16); if (code != 0) { retval = GSS_S_FAILURE; goto cleanup; } cksum.length = cksum_len; cksum.contents = md5cksum.contents + 16 - cksum.length; code = k5_bcmp(cksum.contents, ptr + 14, cksum.length); break; case SGN_ALG_HMAC_SHA1_DES3_KD: case SGN_ALG_HMAC_MD5: code = k5_bcmp(md5cksum.contents, ptr + 14, cksum_len); break; default: code = 0; retval = GSS_S_DEFECTIVE_TOKEN; goto cleanup; break; } if (code != 0) { code = 0; retval = GSS_S_BAD_SIG; goto cleanup; } /* * For GSS_C_DCE_STYLE, the caller manages the padding, because the * pad length is in the RPC PDU. The value of the padding may be * uninitialized. For normal GSS, the last bytes of the decrypted * data contain the pad length. kg_fixup_padding_iov() will find * this and fixup the last data IOV appropriately. */ if (toktype == KG_TOK_WRAP_MSG && (ctx->gss_flags & GSS_C_DCE_STYLE) == 0) { retval = kg_fixup_padding_iov(&code, iov, iov_count); if (retval != GSS_S_COMPLETE) goto cleanup; } if (conf_state != NULL) *conf_state = (sealalg != 0xFFFF); if (qop_state != NULL) *qop_state = GSS_C_QOP_DEFAULT; if ((ctx->initiate && direction != 0xff) || (!ctx->initiate && direction != 0)) { *minor_status = (OM_uint32)G_BAD_DIRECTION; retval = GSS_S_BAD_SIG; } code = 0; retval = g_order_check(&ctx->seqstate, (gssint_uint64)seqnum); cleanup: krb5_free_checksum_contents(context, &md5cksum); *minor_status = code; return retval; } Commit Message: Handle invalid RFC 1964 tokens [CVE-2014-4341...] Detect the following cases which would otherwise cause invalid memory accesses and/or integer underflow: * An RFC 1964 token being processed by an RFC 4121-only context [CVE-2014-4342] * A header with fewer than 22 bytes after the token ID or an incomplete checksum [CVE-2014-4341 CVE-2014-4342] * A ciphertext shorter than the confounder [CVE-2014-4341] * A declared padding length longer than the plaintext [CVE-2014-4341] If we detect a bad pad byte, continue on to compute the checksum to avoid creating a padding oracle, but treat the checksum as invalid even if it compares equal. CVE-2014-4341: In MIT krb5, an unauthenticated remote attacker with the ability to inject packets into a legitimately established GSSAPI application session can cause a program crash due to invalid memory references when attempting to read beyond the end of a buffer. CVSSv2 Vector: AV:N/AC:M/Au:N/C:N/I:N/A:P/E:POC/RL:OF/RC:C CVE-2014-4342: In MIT krb5 releases krb5-1.7 and later, an unauthenticated remote attacker with the ability to inject packets into a legitimately established GSSAPI application session can cause a program crash due to invalid memory references when reading beyond the end of a buffer or by causing a null pointer dereference. CVSSv2 Vector: AV:N/AC:M/Au:N/C:N/I:N/A:P/E:POC/RL:OF/RC:C [[email protected]: CVE summaries, CVSS] (cherry picked from commit fb99962cbd063ac04c9a9d2cc7c75eab73f3533d) ticket: 7949 version_fixed: 1.12.2 status: resolved CWE ID: CWE-119
kg_unseal_v1_iov(krb5_context context, OM_uint32 *minor_status, krb5_gss_ctx_id_rec *ctx, gss_iov_buffer_desc *iov, int iov_count, size_t token_wrapper_len, int *conf_state, gss_qop_t *qop_state, int toktype) { OM_uint32 code; gss_iov_buffer_t header; gss_iov_buffer_t trailer; unsigned char *ptr; int sealalg; int signalg; krb5_checksum cksum; krb5_checksum md5cksum; size_t cksum_len = 0; size_t conflen = 0; int direction; krb5_ui_4 seqnum; OM_uint32 retval; size_t sumlen; krb5_keyusage sign_usage = KG_USAGE_SIGN; md5cksum.length = cksum.length = 0; md5cksum.contents = cksum.contents = NULL; header = kg_locate_header_iov(iov, iov_count, toktype); assert(header != NULL); trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER); if (trailer != NULL && trailer->buffer.length != 0) { *minor_status = (OM_uint32)KRB5_BAD_MSIZE; return GSS_S_DEFECTIVE_TOKEN; } if (ctx->seq == NULL) { /* ctx was established using a newer enctype, and cannot process RFC * 1964 tokens. */ *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; } if (header->buffer.length < token_wrapper_len + 22) { *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; } ptr = (unsigned char *)header->buffer.value + token_wrapper_len; signalg = ptr[0]; signalg |= ptr[1] << 8; sealalg = ptr[2]; sealalg |= ptr[3] << 8; if (ptr[4] != 0xFF || ptr[5] != 0xFF) { *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; } if (toktype != KG_TOK_WRAP_MSG && sealalg != 0xFFFF) { *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; } if (toktype == KG_TOK_WRAP_MSG && !(sealalg == 0xFFFF || sealalg == ctx->sealalg)) { *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; } if ((ctx->sealalg == SEAL_ALG_NONE && signalg > 1) || (ctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) || (ctx->sealalg == SEAL_ALG_DES3KD && signalg != SGN_ALG_HMAC_SHA1_DES3_KD)|| (ctx->sealalg == SEAL_ALG_MICROSOFT_RC4 && signalg != SGN_ALG_HMAC_MD5)) { *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; } switch (signalg) { case SGN_ALG_DES_MAC_MD5: case SGN_ALG_MD2_5: case SGN_ALG_HMAC_MD5: cksum_len = 8; if (toktype != KG_TOK_WRAP_MSG) sign_usage = 15; break; case SGN_ALG_3: cksum_len = 16; break; case SGN_ALG_HMAC_SHA1_DES3_KD: cksum_len = 20; break; default: *minor_status = 0; return GSS_S_DEFECTIVE_TOKEN; } /* get the token parameters */ code = kg_get_seq_num(context, ctx->seq, ptr + 14, ptr + 6, &direction, &seqnum); if (code != 0) { *minor_status = code; return GSS_S_BAD_SIG; } /* decode the message, if SEAL */ if (toktype == KG_TOK_WRAP_MSG) { if (sealalg != 0xFFFF) { if (ctx->sealalg == SEAL_ALG_MICROSOFT_RC4) { unsigned char bigend_seqnum[4]; krb5_keyblock *enc_key; size_t i; store_32_be(seqnum, bigend_seqnum); code = krb5_k_key_keyblock(context, ctx->enc, &enc_key); if (code != 0) { retval = GSS_S_FAILURE; goto cleanup; } assert(enc_key->length == 16); for (i = 0; i < enc_key->length; i++) ((char *)enc_key->contents)[i] ^= 0xF0; code = kg_arcfour_docrypt_iov(context, enc_key, 0, &bigend_seqnum[0], 4, iov, iov_count); krb5_free_keyblock(context, enc_key); } else { code = kg_decrypt_iov(context, 0, ((ctx->gss_flags & GSS_C_DCE_STYLE) != 0), 0 /*EC*/, 0 /*RRC*/, ctx->enc, KG_USAGE_SEAL, NULL, iov, iov_count); } if (code != 0) { retval = GSS_S_FAILURE; goto cleanup; } } conflen = kg_confounder_size(context, ctx->enc->keyblock.enctype); } if (header->buffer.length != token_wrapper_len + 14 + cksum_len + conflen) { retval = GSS_S_DEFECTIVE_TOKEN; goto cleanup; } /* compute the checksum of the message */ /* initialize the checksum */ switch (signalg) { case SGN_ALG_DES_MAC_MD5: case SGN_ALG_MD2_5: case SGN_ALG_DES_MAC: case SGN_ALG_3: md5cksum.checksum_type = CKSUMTYPE_RSA_MD5; break; case SGN_ALG_HMAC_MD5: md5cksum.checksum_type = CKSUMTYPE_HMAC_MD5_ARCFOUR; break; case SGN_ALG_HMAC_SHA1_DES3_KD: md5cksum.checksum_type = CKSUMTYPE_HMAC_SHA1_DES3; break; default: abort(); } code = krb5_c_checksum_length(context, md5cksum.checksum_type, &sumlen); if (code != 0) { retval = GSS_S_FAILURE; goto cleanup; } md5cksum.length = sumlen; /* compute the checksum of the message */ code = kg_make_checksum_iov_v1(context, md5cksum.checksum_type, cksum_len, ctx->seq, ctx->enc, sign_usage, iov, iov_count, toktype, &md5cksum); if (code != 0) { retval = GSS_S_FAILURE; goto cleanup; } switch (signalg) { case SGN_ALG_DES_MAC_MD5: case SGN_ALG_3: code = kg_encrypt_inplace(context, ctx->seq, KG_USAGE_SEAL, (g_OID_equal(ctx->mech_used, gss_mech_krb5_old) ? ctx->seq->keyblock.contents : NULL), md5cksum.contents, 16); if (code != 0) { retval = GSS_S_FAILURE; goto cleanup; } cksum.length = cksum_len; cksum.contents = md5cksum.contents + 16 - cksum.length; code = k5_bcmp(cksum.contents, ptr + 14, cksum.length); break; case SGN_ALG_HMAC_SHA1_DES3_KD: case SGN_ALG_HMAC_MD5: code = k5_bcmp(md5cksum.contents, ptr + 14, cksum_len); break; default: code = 0; retval = GSS_S_DEFECTIVE_TOKEN; goto cleanup; break; } if (code != 0) { code = 0; retval = GSS_S_BAD_SIG; goto cleanup; } /* * For GSS_C_DCE_STYLE, the caller manages the padding, because the * pad length is in the RPC PDU. The value of the padding may be * uninitialized. For normal GSS, the last bytes of the decrypted * data contain the pad length. kg_fixup_padding_iov() will find * this and fixup the last data IOV appropriately. */ if (toktype == KG_TOK_WRAP_MSG && (ctx->gss_flags & GSS_C_DCE_STYLE) == 0) { retval = kg_fixup_padding_iov(&code, iov, iov_count); if (retval != GSS_S_COMPLETE) goto cleanup; } if (conf_state != NULL) *conf_state = (sealalg != 0xFFFF); if (qop_state != NULL) *qop_state = GSS_C_QOP_DEFAULT; if ((ctx->initiate && direction != 0xff) || (!ctx->initiate && direction != 0)) { *minor_status = (OM_uint32)G_BAD_DIRECTION; retval = GSS_S_BAD_SIG; } code = 0; retval = g_order_check(&ctx->seqstate, (gssint_uint64)seqnum); cleanup: krb5_free_checksum_contents(context, &md5cksum); *minor_status = code; return retval; }
166,313