instruction
stringclasses
1 value
input
stringlengths
90
139k
output
stringlengths
16
138k
__index_level_0__
int64
165k
175k
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void LoadURL() { WebContents* contents = shell()->web_contents(); WebPreferences prefs = contents->GetRenderViewHost()->GetWebkitPreferences(); prefs.mock_scrollbars_enabled = true; contents->GetRenderViewHost()->UpdateWebkitPreferences(prefs); const GURL data_url(kDataURL); NavigateToURL(shell(), data_url); RenderWidgetHostImpl* host = GetWidgetHost(); HitTestRegionObserver observer(host->GetFrameSinkId()); observer.WaitForHitTestData(); } Commit Message: Revert "Add explicit flag for compositor scrollbar injected gestures" This reverts commit d9a56afcbdf9850bc39bb3edb56d07d11a1eb2b2. Reason for revert: Findit (https://goo.gl/kROfz5) identified CL at revision 669086 as the culprit for flakes in the build cycles as shown on: https://analysis.chromium.org/p/chromium/flake-portal/analysis/culprit?key=ag9zfmZpbmRpdC1mb3ItbWVyQwsSDEZsYWtlQ3VscHJpdCIxY2hyb21pdW0vZDlhNTZhZmNiZGY5ODUwYmMzOWJiM2VkYjU2ZDA3ZDExYTFlYjJiMgw Sample Failed Build: https://ci.chromium.org/buildbot/chromium.chromiumos/linux-chromeos-rel/25818 Sample Failed Step: content_browsertests on Ubuntu-16.04 Sample Flaky Test: ScrollLatencyScrollbarBrowserTest.ScrollbarThumbDragLatency Original change's description: > Add explicit flag for compositor scrollbar injected gestures > > The original change to enable scrollbar latency for the composited > scrollbars incorrectly used an existing member to try and determine > whether a GestureScrollUpdate was the first one in an injected sequence > or not. is_first_gesture_scroll_update_ was incorrect because it is only > updated when input is actually dispatched to InputHandlerProxy, and the > flag is cleared for all GSUs before the location where it was being > read. > > This bug was missed because of incorrect tests. The > VerifyRecordedSamplesForHistogram method doesn't actually assert or > expect anything - the return value must be inspected. > > As part of fixing up the tests, I made a few other changes to get them > passing consistently across all platforms: > - turn on main thread scrollbar injection feature (in case it's ever > turned off we don't want the tests to start failing) > - enable mock scrollbars > - disable smooth scrolling > - don't run scrollbar tests on Android > > The composited scrollbar button test is disabled due to a bug in how > the mock theme reports its button sizes, which throws off the region > detection in ScrollbarLayerImplBase::IdentifyScrollbarPart (filed > crbug.com/974063 for this issue). > > Change-Id: Ie1a762a5f6ecc264d22f0256db68f141fc76b950 > > Bug: 954007 > Change-Id: Ib258e08e083e79da90ba2e4e4216e4879cf00cf7 > Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1652741 > Commit-Queue: Daniel Libby <[email protected]> > Reviewed-by: David Bokan <[email protected]> > Cr-Commit-Position: refs/heads/master@{#669086} Change-Id: Icc743e48fa740fe27f0cb0cfa21b209a696f518c No-Presubmit: true No-Tree-Checks: true No-Try: true Bug: 954007 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1660114 Cr-Commit-Position: refs/heads/master@{#669150} CWE ID: CWE-281
void LoadURL() { const GURL data_url(kDataURL); NavigateToURL(shell(), data_url); RenderWidgetHostImpl* host = GetWidgetHost(); host->GetView()->SetSize(gfx::Size(400, 400)); HitTestRegionObserver observer(host->GetFrameSinkId()); observer.WaitForHitTestData(); }
172,427
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: off_t HFSForkReadStream::Seek(off_t offset, int whence) { DCHECK_EQ(SEEK_SET, whence); DCHECK_GE(offset, 0); DCHECK_LT(static_cast<uint64_t>(offset), fork_.logicalSize); size_t target_block = offset / hfs_->block_size(); size_t block_count = 0; for (size_t i = 0; i < arraysize(fork_.extents); ++i) { const HFSPlusExtentDescriptor* extent = &fork_.extents[i]; if (extent->startBlock == 0 && extent->blockCount == 0) break; base::CheckedNumeric<size_t> new_block_count(block_count); new_block_count += extent->blockCount; if (!new_block_count.IsValid()) { DLOG(ERROR) << "Seek offset block count overflows"; return false; } if (target_block < new_block_count.ValueOrDie()) { if (current_extent_ != i) { read_current_extent_ = false; current_extent_ = i; } auto iterator_block_offset = base::CheckedNumeric<size_t>(block_count) * hfs_->block_size(); if (!iterator_block_offset.IsValid()) { DLOG(ERROR) << "Seek block offset overflows"; return false; } fork_logical_offset_ = offset; return offset; } block_count = new_block_count.ValueOrDie(); } return -1; } Commit Message: Add the SandboxedDMGParser and wire it up to the DownloadProtectionService. BUG=496898,464083 [email protected], [email protected], [email protected], [email protected] Review URL: https://codereview.chromium.org/1299223006 . Cr-Commit-Position: refs/heads/master@{#344876} CWE ID:
off_t HFSForkReadStream::Seek(off_t offset, int whence) { DCHECK_EQ(SEEK_SET, whence); DCHECK_GE(offset, 0); DCHECK(offset == 0 || static_cast<uint64_t>(offset) < fork_.logicalSize); size_t target_block = offset / hfs_->block_size(); size_t block_count = 0; for (size_t i = 0; i < arraysize(fork_.extents); ++i) { const HFSPlusExtentDescriptor* extent = &fork_.extents[i]; if (extent->startBlock == 0 && extent->blockCount == 0) break; base::CheckedNumeric<size_t> new_block_count(block_count); new_block_count += extent->blockCount; if (!new_block_count.IsValid()) { DLOG(ERROR) << "Seek offset block count overflows"; return false; } if (target_block < new_block_count.ValueOrDie()) { if (current_extent_ != i) { read_current_extent_ = false; current_extent_ = i; } auto iterator_block_offset = base::CheckedNumeric<size_t>(block_count) * hfs_->block_size(); if (!iterator_block_offset.IsValid()) { DLOG(ERROR) << "Seek block offset overflows"; return false; } fork_logical_offset_ = offset; return offset; } block_count = new_block_count.ValueOrDie(); } return -1; }
171,717
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: Utterance::Utterance(Profile* profile, const std::string& text, DictionaryValue* options, Task* completion_task) : profile_(profile), id_(next_utterance_id_++), text_(text), rate_(-1.0), pitch_(-1.0), volume_(-1.0), can_enqueue_(false), completion_task_(completion_task) { if (!options) { options_.reset(new DictionaryValue()); return; } options_.reset(options->DeepCopy()); if (options->HasKey(util::kVoiceNameKey)) options->GetString(util::kVoiceNameKey, &voice_name_); if (options->HasKey(util::kLocaleKey)) options->GetString(util::kLocaleKey, &locale_); if (options->HasKey(util::kGenderKey)) options->GetString(util::kGenderKey, &gender_); if (options->GetDouble(util::kRateKey, &rate_)) { if (!base::IsFinite(rate_) || rate_ < 0.0 || rate_ > 1.0) rate_ = -1.0; } if (options->GetDouble(util::kPitchKey, &pitch_)) { if (!base::IsFinite(pitch_) || pitch_ < 0.0 || pitch_ > 1.0) pitch_ = -1.0; } if (options->GetDouble(util::kVolumeKey, &volume_)) { if (!base::IsFinite(volume_) || volume_ < 0.0 || volume_ > 1.0) volume_ = -1.0; } if (options->HasKey(util::kEnqueueKey)) options->GetBoolean(util::kEnqueueKey, &can_enqueue_); } Commit Message: Extend TTS extension API to support richer events returned from the engine to the client. Previously we just had a completed event; this adds start, word boundary, sentence boundary, and marker boundary. In addition, interrupted and canceled, which were previously errors, now become events. Mac and Windows implementations extended to support as many of these events as possible. BUG=67713 BUG=70198 BUG=75106 BUG=83404 TEST=Updates all TTS API tests to be event-based, and adds new tests. Review URL: http://codereview.chromium.org/6792014 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@91665 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-20
Utterance::Utterance(Profile* profile, bool ExtensionTtsSpeakFunction::RunImpl() { std::string text; EXTENSION_FUNCTION_VALIDATE(args_->GetString(0, &text)); if (text.size() > 32768) { error_ = constants::kErrorUtteranceTooLong; return false; } scoped_ptr<DictionaryValue> options; if (args_->GetSize() >= 2) { DictionaryValue* temp_options = NULL; EXTENSION_FUNCTION_VALIDATE(args_->GetDictionary(1, &temp_options)); options.reset(temp_options->DeepCopy()); } std::string voice_name; if (options->HasKey(constants::kVoiceNameKey)) { EXTENSION_FUNCTION_VALIDATE( options->GetString(constants::kVoiceNameKey, &voice_name)); } std::string lang; if (options->HasKey(constants::kLangKey)) EXTENSION_FUNCTION_VALIDATE(options->GetString(constants::kLangKey, &lang)); if (!lang.empty() && !l10n_util::IsValidLocaleSyntax(lang)) { error_ = constants::kErrorInvalidLang; return false; }
170,392
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool ContainOnlyOneKeyboardLayout( const ImeConfigValue& value) { return (value.type == ImeConfigValue::kValueTypeStringList && value.string_list_value.size() == 1 && chromeos::input_method::IsKeyboardLayout( value.string_list_value[0])); } Commit Message: Remove use of libcros from InputMethodLibrary. BUG=chromium-os:16238 TEST==confirm that input methods work as before on the netbook. Also confirm that the chrome builds and works on the desktop as before. Review URL: http://codereview.chromium.org/7003086 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@89142 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
bool ContainOnlyOneKeyboardLayout( const input_method::ImeConfigValue& value) { return (value.type == input_method::ImeConfigValue::kValueTypeStringList && value.string_list_value.size() == 1 && input_method::IsKeyboardLayout( value.string_list_value[0])); }
170,483
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: mwifiex_set_wmm_params(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_ap_settings *params) { const u8 *vendor_ie; const u8 *wmm_ie; u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02}; vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WMM, params->beacon.tail, params->beacon.tail_len); if (vendor_ie) { wmm_ie = vendor_ie; memcpy(&bss_cfg->wmm_info, wmm_ie + sizeof(struct ieee_types_header), *(wmm_ie + 1)); priv->wmm_enabled = 1; } else { memset(&bss_cfg->wmm_info, 0, sizeof(bss_cfg->wmm_info)); memcpy(&bss_cfg->wmm_info.oui, wmm_oui, sizeof(wmm_oui)); bss_cfg->wmm_info.subtype = MWIFIEX_WMM_SUBTYPE; bss_cfg->wmm_info.version = MWIFIEX_WMM_VERSION; priv->wmm_enabled = 0; } bss_cfg->qos_info = 0x00; return; } Commit Message: mwifiex: Fix three heap overflow at parsing element in cfg80211_ap_settings mwifiex_update_vs_ie(),mwifiex_set_uap_rates() and mwifiex_set_wmm_params() call memcpy() without checking the destination size.Since the source is given from user-space, this may trigger a heap buffer overflow. Fix them by putting the length check before performing memcpy(). This fix addresses CVE-2019-14814,CVE-2019-14815,CVE-2019-14816. Signed-off-by: Wen Huang <[email protected]> Acked-by: Ganapathi Bhat <[email protected]> Signed-off-by: Kalle Valo <[email protected]> CWE ID: CWE-120
mwifiex_set_wmm_params(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_ap_settings *params) { const u8 *vendor_ie; const u8 *wmm_ie; u8 wmm_oui[] = {0x00, 0x50, 0xf2, 0x02}; vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WMM, params->beacon.tail, params->beacon.tail_len); if (vendor_ie) { wmm_ie = vendor_ie; if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info)) return; memcpy(&bss_cfg->wmm_info, wmm_ie + sizeof(struct ieee_types_header), *(wmm_ie + 1)); priv->wmm_enabled = 1; } else { memset(&bss_cfg->wmm_info, 0, sizeof(bss_cfg->wmm_info)); memcpy(&bss_cfg->wmm_info.oui, wmm_oui, sizeof(wmm_oui)); bss_cfg->wmm_info.subtype = MWIFIEX_WMM_SUBTYPE; bss_cfg->wmm_info.version = MWIFIEX_WMM_VERSION; priv->wmm_enabled = 0; } bss_cfg->qos_info = 0x00; return; }
169,577
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: GURL GetFileManagerBaseUrl() { return GetFileManagerUrl("/"); } Commit Message: Reland r286968: The CL borrows ShareDialog from Files.app and add it to Gallery. Previous Review URL: https://codereview.chromium.org/431293002 BUG=374667 TEST=manually [email protected], [email protected] Review URL: https://codereview.chromium.org/433733004 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@286975 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
GURL GetFileManagerBaseUrl() {
171,195
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ID3::Iterator::getstring(String8 *id, bool otherdata) const { id->setTo(""); const uint8_t *frameData = mFrameData; if (frameData == NULL) { return; } uint8_t encoding = *frameData; if (mParent.mVersion == ID3_V1 || mParent.mVersion == ID3_V1_1) { if (mOffset == 126 || mOffset == 127) { char tmp[16]; sprintf(tmp, "%d", (int)*frameData); id->setTo(tmp); return; } id->setTo((const char*)frameData, mFrameSize); return; } if (mFrameSize < getHeaderLength() + 1) { return; } size_t n = mFrameSize - getHeaderLength() - 1; if (otherdata) { frameData += 4; int32_t i = n - 4; while(--i >= 0 && *++frameData != 0) ; int skipped = (frameData - mFrameData); if (skipped >= (int)n) { return; } n -= skipped; } if (encoding == 0x00) { id->setTo((const char*)frameData + 1, n); } else if (encoding == 0x03) { id->setTo((const char *)(frameData + 1), n); } else if (encoding == 0x02) { int len = n / 2; const char16_t *framedata = (const char16_t *) (frameData + 1); char16_t *framedatacopy = NULL; #if BYTE_ORDER == LITTLE_ENDIAN framedatacopy = new char16_t[len]; for (int i = 0; i < len; i++) { framedatacopy[i] = bswap_16(framedata[i]); } framedata = framedatacopy; #endif id->setTo(framedata, len); if (framedatacopy != NULL) { delete[] framedatacopy; } } else if (encoding == 0x01) { int len = n / 2; const char16_t *framedata = (const char16_t *) (frameData + 1); char16_t *framedatacopy = NULL; if (*framedata == 0xfffe) { framedatacopy = new char16_t[len]; for (int i = 0; i < len; i++) { framedatacopy[i] = bswap_16(framedata[i]); } framedata = framedatacopy; } if (*framedata == 0xfeff) { framedata++; len--; } bool eightBit = true; for (int i = 0; i < len; i++) { if (framedata[i] > 0xff) { eightBit = false; break; } } if (eightBit) { char *frame8 = new char[len]; for (int i = 0; i < len; i++) { frame8[i] = framedata[i]; } id->setTo(frame8, len); delete [] frame8; } else { id->setTo(framedata, len); } if (framedatacopy != NULL) { delete[] framedatacopy; } } } Commit Message: better validation lengths of strings in ID3 tags Validate lengths on strings in ID3 tags, particularly around 0. Also added code to handle cases when we can't get memory for copies of strings we want to extract from these tags. Affects L/M/N/master, same patch for all of them. Bug: 30744884 Change-Id: I2675a817a39f0927ec1f7e9f9c09f2e61020311e Test: play mp3 file which caused a <0 length. (cherry picked from commit d23c01546c4f82840a01a380def76ab6cae5d43f) CWE ID: CWE-20
void ID3::Iterator::getstring(String8 *id, bool otherdata) const { id->setTo(""); const uint8_t *frameData = mFrameData; if (frameData == NULL) { return; } uint8_t encoding = *frameData; if (mParent.mVersion == ID3_V1 || mParent.mVersion == ID3_V1_1) { if (mOffset == 126 || mOffset == 127) { char tmp[16]; sprintf(tmp, "%d", (int)*frameData); id->setTo(tmp); return; } id->setTo((const char*)frameData, mFrameSize); return; } if (mFrameSize < getHeaderLength() + 1) { return; } size_t n = mFrameSize - getHeaderLength() - 1; if (otherdata) { frameData += 4; int32_t i = n - 4; while(--i >= 0 && *++frameData != 0) ; int skipped = (frameData - mFrameData); if (skipped >= (int)n) { return; } n -= skipped; } if (n <= 0) { return; } if (encoding == 0x00) { id->setTo((const char*)frameData + 1, n); } else if (encoding == 0x03) { id->setTo((const char *)(frameData + 1), n); } else if (encoding == 0x02) { int len = n / 2; const char16_t *framedata = (const char16_t *) (frameData + 1); char16_t *framedatacopy = NULL; #if BYTE_ORDER == LITTLE_ENDIAN if (len > 0) { framedatacopy = new (std::nothrow) char16_t[len]; if (framedatacopy == NULL) { return; } for (int i = 0; i < len; i++) { framedatacopy[i] = bswap_16(framedata[i]); } framedata = framedatacopy; } #endif id->setTo(framedata, len); if (framedatacopy != NULL) { delete[] framedatacopy; } } else if (encoding == 0x01) { int len = n / 2; const char16_t *framedata = (const char16_t *) (frameData + 1); char16_t *framedatacopy = NULL; if (*framedata == 0xfffe) { // endianness marker != host endianness, convert & skip if (len <= 1) { return; // nothing after the marker } framedatacopy = new (std::nothrow) char16_t[len]; if (framedatacopy == NULL) { return; } for (int i = 0; i < len; i++) { framedatacopy[i] = bswap_16(framedata[i]); } framedata = framedatacopy; // and skip over the marker framedata++; len--; } else if (*framedata == 0xfeff) { // endianness marker == host endianness, skip it if (len <= 1) { return; // nothing after the marker } framedata++; len--; } bool eightBit = true; for (int i = 0; i < len; i++) { if (framedata[i] > 0xff) { eightBit = false; break; } } if (eightBit) { char *frame8 = new (std::nothrow) char[len]; if (frame8 != NULL) { for (int i = 0; i < len; i++) { frame8[i] = framedata[i]; } id->setTo(frame8, len); delete [] frame8; } else { id->setTo(framedata, len); } } else { id->setTo(framedata, len); } if (framedatacopy != NULL) { delete[] framedatacopy; } } }
173,393
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool CheckClientDownloadRequest::ShouldUploadForMalwareScan( DownloadCheckResultReason reason) { if (!base::FeatureList::IsEnabled(kDeepScanningOfDownloads)) return false; if (reason != DownloadCheckResultReason::REASON_DOWNLOAD_SAFE && reason != DownloadCheckResultReason::REASON_DOWNLOAD_UNCOMMON && reason != DownloadCheckResultReason::REASON_VERDICT_UNKNOWN) return false; content::BrowserContext* browser_context = content::DownloadItemUtils::GetBrowserContext(item_); if (!browser_context) return false; Profile* profile = Profile::FromBrowserContext(browser_context); if (!profile) return false; int send_files_for_malware_check = profile->GetPrefs()->GetInteger( prefs::kSafeBrowsingSendFilesForMalwareCheck); if (send_files_for_malware_check != SendFilesForMalwareCheckValues::SEND_DOWNLOADS && send_files_for_malware_check != SendFilesForMalwareCheckValues::SEND_UPLOADS_AND_DOWNLOADS) return false; return !policy::BrowserDMTokenStorage::Get()->RetrieveDMToken().empty(); } Commit Message: Migrate download_protection code to new DM token class. Migrates RetrieveDMToken calls to use the new BrowserDMToken class. Bug: 1020296 Change-Id: Icef580e243430d73b6c1c42b273a8540277481d9 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1904234 Commit-Queue: Dominique Fauteux-Chapleau <[email protected]> Reviewed-by: Tien Mai <[email protected]> Reviewed-by: Daniel Rubery <[email protected]> Cr-Commit-Position: refs/heads/master@{#714196} CWE ID: CWE-20
bool CheckClientDownloadRequest::ShouldUploadForMalwareScan( DownloadCheckResultReason reason) { if (!base::FeatureList::IsEnabled(kDeepScanningOfDownloads)) return false; if (reason != DownloadCheckResultReason::REASON_DOWNLOAD_SAFE && reason != DownloadCheckResultReason::REASON_DOWNLOAD_UNCOMMON && reason != DownloadCheckResultReason::REASON_VERDICT_UNKNOWN) return false; content::BrowserContext* browser_context = content::DownloadItemUtils::GetBrowserContext(item_); if (!browser_context) return false; Profile* profile = Profile::FromBrowserContext(browser_context); if (!profile) return false; int send_files_for_malware_check = profile->GetPrefs()->GetInteger( prefs::kSafeBrowsingSendFilesForMalwareCheck); if (send_files_for_malware_check != SendFilesForMalwareCheckValues::SEND_DOWNLOADS && send_files_for_malware_check != SendFilesForMalwareCheckValues::SEND_UPLOADS_AND_DOWNLOADS) return false; // If there's no valid DM token, the upload will fail, so we can skip // uploading now. return BrowserDMTokenStorage::Get()->RetrieveBrowserDMToken().is_valid(); }
172,357
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void IOThread::RegisterPrefs(PrefRegistrySimple* registry) { registry->RegisterStringPref(prefs::kAuthSchemes, "basic,digest,ntlm,negotiate," "spdyproxy"); registry->RegisterBooleanPref(prefs::kDisableAuthNegotiateCnameLookup, false); registry->RegisterBooleanPref(prefs::kEnableAuthNegotiatePort, false); registry->RegisterStringPref(prefs::kAuthServerWhitelist, std::string()); registry->RegisterStringPref(prefs::kAuthNegotiateDelegateWhitelist, std::string()); registry->RegisterStringPref(prefs::kGSSAPILibraryName, std::string()); registry->RegisterStringPref(prefs::kSpdyProxyAuthOrigin, std::string()); registry->RegisterBooleanPref(prefs::kEnableReferrers, true); registry->RegisterInt64Pref(prefs::kHttpReceivedContentLength, 0); registry->RegisterInt64Pref(prefs::kHttpOriginalContentLength, 0); #if defined(OS_ANDROID) || defined(OS_IOS) registry->RegisterListPref(prefs::kDailyHttpOriginalContentLength); registry->RegisterListPref(prefs::kDailyHttpReceivedContentLength); registry->RegisterListPref( prefs::kDailyOriginalContentLengthWithDataReductionProxyEnabled); registry->RegisterListPref( prefs::kDailyContentLengthWithDataReductionProxyEnabled); registry->RegisterListPref( prefs::kDailyOriginalContentLengthViaDataReductionProxy); registry->RegisterListPref( prefs::kDailyContentLengthViaDataReductionProxy); registry->RegisterInt64Pref(prefs::kDailyHttpContentLengthLastUpdateDate, 0L); #endif registry->RegisterBooleanPref(prefs::kBuiltInDnsClientEnabled, true); } Commit Message: Added daily UMA for non-data-reduction-proxy data usage when the proxy is enabled. BUG=325325 Review URL: https://codereview.chromium.org/106113002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@239897 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-416
void IOThread::RegisterPrefs(PrefRegistrySimple* registry) { registry->RegisterStringPref(prefs::kAuthSchemes, "basic,digest,ntlm,negotiate," "spdyproxy"); registry->RegisterBooleanPref(prefs::kDisableAuthNegotiateCnameLookup, false); registry->RegisterBooleanPref(prefs::kEnableAuthNegotiatePort, false); registry->RegisterStringPref(prefs::kAuthServerWhitelist, std::string()); registry->RegisterStringPref(prefs::kAuthNegotiateDelegateWhitelist, std::string()); registry->RegisterStringPref(prefs::kGSSAPILibraryName, std::string()); registry->RegisterStringPref(prefs::kSpdyProxyAuthOrigin, std::string()); registry->RegisterBooleanPref(prefs::kEnableReferrers, true); registry->RegisterInt64Pref(prefs::kHttpReceivedContentLength, 0); registry->RegisterInt64Pref(prefs::kHttpOriginalContentLength, 0); #if defined(OS_ANDROID) || defined(OS_IOS) registry->RegisterListPref(prefs::kDailyHttpOriginalContentLength); registry->RegisterListPref(prefs::kDailyHttpReceivedContentLength); registry->RegisterListPref( prefs::kDailyOriginalContentLengthWithDataReductionProxyEnabled); registry->RegisterListPref( prefs::kDailyContentLengthWithDataReductionProxyEnabled); registry->RegisterListPref( prefs::kDailyContentLengthHttpsWithDataReductionProxyEnabled); registry->RegisterListPref( prefs::kDailyContentLengthShortBypassWithDataReductionProxyEnabled); registry->RegisterListPref( prefs::kDailyContentLengthLongBypassWithDataReductionProxyEnabled); registry->RegisterListPref( prefs::kDailyContentLengthUnknownWithDataReductionProxyEnabled); registry->RegisterListPref( prefs::kDailyOriginalContentLengthViaDataReductionProxy); registry->RegisterListPref( prefs::kDailyContentLengthViaDataReductionProxy); registry->RegisterInt64Pref(prefs::kDailyHttpContentLengthLastUpdateDate, 0L); #endif registry->RegisterBooleanPref(prefs::kBuiltInDnsClientEnabled, true); }
171,320
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void CompileFromResponseCallback( const v8::FunctionCallbackInfo<v8::Value>& args) { ExceptionState exception_state(args.GetIsolate(), ExceptionState::kExecutionContext, "WebAssembly", "compile"); ExceptionToRejectPromiseScope reject_promise_scope(args, exception_state); ScriptState* script_state = ScriptState::ForRelevantRealm(args); if (!ExecutionContext::From(script_state)) { V8SetReturnValue(args, ScriptPromise().V8Value()); return; } if (args.Length() < 1 || !args[0]->IsObject() || !V8Response::hasInstance(args[0], args.GetIsolate())) { V8SetReturnValue( args, ScriptPromise::Reject( script_state, V8ThrowException::CreateTypeError( script_state->GetIsolate(), "An argument must be provided, which must be a" "Response or Promise<Response> object")) .V8Value()); return; } Response* response = V8Response::ToImpl(v8::Local<v8::Object>::Cast(args[0])); if (response->MimeType() != "application/wasm") { V8SetReturnValue( args, ScriptPromise::Reject( script_state, V8ThrowException::CreateTypeError( script_state->GetIsolate(), "Incorrect response MIME type. Expected 'application/wasm'.")) .V8Value()); return; } v8::Local<v8::Value> promise; if (response->IsBodyLocked() || response->bodyUsed()) { promise = ScriptPromise::Reject(script_state, V8ThrowException::CreateTypeError( script_state->GetIsolate(), "Cannot compile WebAssembly.Module " "from an already read Response")) .V8Value(); } else { if (response->BodyBuffer()) { FetchDataLoaderAsWasmModule* loader = new FetchDataLoaderAsWasmModule(script_state); promise = loader->GetPromise(); response->BodyBuffer()->StartLoading(loader, new WasmDataLoaderClient()); } else { promise = ScriptPromise::Reject(script_state, V8ThrowException::CreateTypeError( script_state->GetIsolate(), "Response object has a null body.")) .V8Value(); } } V8SetReturnValue(args, promise); } Commit Message: [wasm] Use correct bindings APIs Use ScriptState::ForCurrentRealm in static methods, instead of ForRelevantRealm(). Bug: chromium:788453 Change-Id: I63bd25e3f5a4e8d7cbaff945da8df0d71aa65527 Reviewed-on: https://chromium-review.googlesource.com/795096 Commit-Queue: Mircea Trofin <[email protected]> Reviewed-by: Yuki Shiino <[email protected]> Reviewed-by: Kentaro Hara <[email protected]> Cr-Commit-Position: refs/heads/master@{#520174} CWE ID: CWE-79
void CompileFromResponseCallback( const v8::FunctionCallbackInfo<v8::Value>& args) { ExceptionState exception_state(args.GetIsolate(), ExceptionState::kExecutionContext, "WebAssembly", "compile"); ExceptionToRejectPromiseScope reject_promise_scope(args, exception_state); ScriptState* script_state = ScriptState::ForCurrentRealm(args); if (!ExecutionContext::From(script_state)) { V8SetReturnValue(args, ScriptPromise().V8Value()); return; } if (args.Length() < 1 || !args[0]->IsObject() || !V8Response::hasInstance(args[0], args.GetIsolate())) { V8SetReturnValue( args, ScriptPromise::Reject( script_state, V8ThrowException::CreateTypeError( script_state->GetIsolate(), "An argument must be provided, which must be a " "Response or Promise<Response> object")) .V8Value()); return; } Response* response = V8Response::ToImpl(v8::Local<v8::Object>::Cast(args[0])); if (response->MimeType() != "application/wasm") { V8SetReturnValue( args, ScriptPromise::Reject( script_state, V8ThrowException::CreateTypeError( script_state->GetIsolate(), "Incorrect response MIME type. Expected 'application/wasm'.")) .V8Value()); return; } v8::Local<v8::Value> promise; if (response->IsBodyLocked() || response->bodyUsed()) { promise = ScriptPromise::Reject(script_state, V8ThrowException::CreateTypeError( script_state->GetIsolate(), "Cannot compile WebAssembly.Module " "from an already read Response")) .V8Value(); } else { if (response->BodyBuffer()) { FetchDataLoaderAsWasmModule* loader = new FetchDataLoaderAsWasmModule(script_state); promise = loader->GetPromise(); response->BodyBuffer()->StartLoading(loader, new WasmDataLoaderClient()); } else { promise = ScriptPromise::Reject(script_state, V8ThrowException::CreateTypeError( script_state->GetIsolate(), "Response object has a null body.")) .V8Value(); } } V8SetReturnValue(args, promise); }
172,938
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void OneClickSigninHelper::DidStopLoading( content::RenderViewHost* render_view_host) { content::WebContents* contents = web_contents(); const GURL url = contents->GetURL(); Profile* profile = Profile::FromBrowserContext(contents->GetBrowserContext()); VLOG(1) << "OneClickSigninHelper::DidStopLoading: url=" << url.spec(); if (!error_message_.empty() && auto_accept_ == AUTO_ACCEPT_EXPLICIT) { VLOG(1) << "OneClickSigninHelper::DidStopLoading: error=" << error_message_; RemoveCurrentHistoryItem(contents); Browser* browser = chrome::FindBrowserWithWebContents(contents); RedirectToNtpOrAppsPage(web_contents(), source_); ShowSigninErrorBubble(browser, error_message_); CleanTransientState(); return; } if (AreWeShowingSignin(url, source_, email_)) { if (!showing_signin_) { if (source_ == SyncPromoUI::SOURCE_UNKNOWN) LogOneClickHistogramValue(one_click_signin::HISTOGRAM_SHOWN); else LogHistogramValue(source_, one_click_signin::HISTOGRAM_SHOWN); } showing_signin_ = true; } GURL::Replacements replacements; replacements.ClearQuery(); const bool continue_url_match = ( continue_url_.is_valid() && url.ReplaceComponents(replacements) == continue_url_.ReplaceComponents(replacements)); if (continue_url_match) RemoveCurrentHistoryItem(contents); if (email_.empty()) { VLOG(1) << "OneClickSigninHelper::DidStopLoading: nothing to do"; if (continue_url_match && auto_accept_ == AUTO_ACCEPT_EXPLICIT) RedirectToSignin(); std::string unused_value; if (net::GetValueForKeyInQuery(url, "ntp", &unused_value)) { SyncPromoUI::SetUserSkippedSyncPromo(profile); RedirectToNtpOrAppsPage(web_contents(), source_); } if (!continue_url_match && !IsValidGaiaSigninRedirectOrResponseURL(url) && ++untrusted_navigations_since_signin_visit_ > kMaxNavigationsSince) { CleanTransientState(); } return; } bool force_same_tab_navigation = false; if (!continue_url_match && IsValidGaiaSigninRedirectOrResponseURL(url)) return; if (auto_accept_ == AUTO_ACCEPT_EXPLICIT) { DCHECK(source_ != SyncPromoUI::SOURCE_UNKNOWN); if (!continue_url_match) { VLOG(1) << "OneClickSigninHelper::DidStopLoading: invalid url='" << url.spec() << "' expected continue url=" << continue_url_; CleanTransientState(); return; } SyncPromoUI::Source source = SyncPromoUI::GetSourceForSyncPromoURL(url); if (source != source_) { original_source_ = source_; source_ = source; force_same_tab_navigation = source == SyncPromoUI::SOURCE_SETTINGS; switched_to_advanced_ = source == SyncPromoUI::SOURCE_SETTINGS; } } Browser* browser = chrome::FindBrowserWithWebContents(contents); VLOG(1) << "OneClickSigninHelper::DidStopLoading: signin is go." << " auto_accept=" << auto_accept_ << " source=" << source_; switch (auto_accept_) { case AUTO_ACCEPT_NONE: if (showing_signin_) LogOneClickHistogramValue(one_click_signin::HISTOGRAM_DISMISSED); break; case AUTO_ACCEPT_ACCEPTED: LogOneClickHistogramValue(one_click_signin::HISTOGRAM_ACCEPTED); LogOneClickHistogramValue(one_click_signin::HISTOGRAM_WITH_DEFAULTS); SigninManager::DisableOneClickSignIn(profile); StartSync(StartSyncArgs(profile, browser, auto_accept_, session_index_, email_, password_, false /* force_same_tab_navigation */, true /* confirmation_required */, source_), OneClickSigninSyncStarter::SYNC_WITH_DEFAULT_SETTINGS); break; case AUTO_ACCEPT_CONFIGURE: LogOneClickHistogramValue(one_click_signin::HISTOGRAM_ACCEPTED); LogOneClickHistogramValue(one_click_signin::HISTOGRAM_WITH_ADVANCED); SigninManager::DisableOneClickSignIn(profile); StartSync( StartSyncArgs(profile, browser, auto_accept_, session_index_, email_, password_, false /* force_same_tab_navigation */, false /* confirmation_required */, source_), OneClickSigninSyncStarter::CONFIGURE_SYNC_FIRST); break; case AUTO_ACCEPT_EXPLICIT: { if (switched_to_advanced_) { LogHistogramValue(original_source_, one_click_signin::HISTOGRAM_WITH_ADVANCED); LogHistogramValue(original_source_, one_click_signin::HISTOGRAM_ACCEPTED); } else { LogHistogramValue(source_, one_click_signin::HISTOGRAM_ACCEPTED); LogHistogramValue(source_, one_click_signin::HISTOGRAM_WITH_DEFAULTS); } OneClickSigninSyncStarter::StartSyncMode start_mode = source_ == SyncPromoUI::SOURCE_SETTINGS ? OneClickSigninSyncStarter::CONFIGURE_SYNC_FIRST : OneClickSigninSyncStarter::SYNC_WITH_DEFAULT_SETTINGS; std::string last_email = profile->GetPrefs()->GetString(prefs::kGoogleServicesLastUsername); if (!last_email.empty() && !gaia::AreEmailsSame(last_email, email_)) { ConfirmEmailDialogDelegate::AskForConfirmation( contents, last_email, email_, base::Bind( &StartExplicitSync, StartSyncArgs(profile, browser, auto_accept_, session_index_, email_, password_, force_same_tab_navigation, false /* confirmation_required */, source_), contents, start_mode)); } else { StartSync( StartSyncArgs(profile, browser, auto_accept_, session_index_, email_, password_, force_same_tab_navigation, untrusted_confirmation_required_, source_), start_mode); RedirectToNtpOrAppsPageIfNecessary(web_contents(), source_); } if (source_ == SyncPromoUI::SOURCE_SETTINGS && SyncPromoUI::GetSourceForSyncPromoURL(continue_url_) == SyncPromoUI::SOURCE_WEBSTORE_INSTALL) { redirect_url_ = continue_url_; ProfileSyncService* sync_service = ProfileSyncServiceFactory::GetForProfile(profile); if (sync_service) sync_service->AddObserver(this); } break; } case AUTO_ACCEPT_REJECTED_FOR_PROFILE: AddEmailToOneClickRejectedList(profile, email_); LogOneClickHistogramValue(one_click_signin::HISTOGRAM_REJECTED); break; default: NOTREACHED() << "Invalid auto_accept=" << auto_accept_; break; } CleanTransientState(); } Commit Message: Display confirmation dialog for untrusted signins BUG=252062 Review URL: https://chromiumcodereview.appspot.com/17482002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@208520 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-200
void OneClickSigninHelper::DidStopLoading( content::RenderViewHost* render_view_host) { content::WebContents* contents = web_contents(); const GURL url = contents->GetURL(); Profile* profile = Profile::FromBrowserContext(contents->GetBrowserContext()); VLOG(1) << "OneClickSigninHelper::DidStopLoading: url=" << url.spec(); if (!error_message_.empty() && auto_accept_ == AUTO_ACCEPT_EXPLICIT) { VLOG(1) << "OneClickSigninHelper::DidStopLoading: error=" << error_message_; RemoveCurrentHistoryItem(contents); Browser* browser = chrome::FindBrowserWithWebContents(contents); RedirectToNtpOrAppsPage(web_contents(), source_); ShowSigninErrorBubble(browser, error_message_); CleanTransientState(); return; } if (AreWeShowingSignin(url, source_, email_)) { if (!showing_signin_) { if (source_ == SyncPromoUI::SOURCE_UNKNOWN) LogOneClickHistogramValue(one_click_signin::HISTOGRAM_SHOWN); else LogHistogramValue(source_, one_click_signin::HISTOGRAM_SHOWN); } showing_signin_ = true; } GURL::Replacements replacements; replacements.ClearQuery(); const bool continue_url_match = ( continue_url_.is_valid() && url.ReplaceComponents(replacements) == continue_url_.ReplaceComponents(replacements)); if (continue_url_match) RemoveCurrentHistoryItem(contents); if (email_.empty()) { VLOG(1) << "OneClickSigninHelper::DidStopLoading: nothing to do"; if (continue_url_match && auto_accept_ == AUTO_ACCEPT_EXPLICIT) RedirectToSignin(); std::string unused_value; if (net::GetValueForKeyInQuery(url, "ntp", &unused_value)) { SyncPromoUI::SetUserSkippedSyncPromo(profile); RedirectToNtpOrAppsPage(web_contents(), source_); } if (!continue_url_match && !IsValidGaiaSigninRedirectOrResponseURL(url) && ++untrusted_navigations_since_signin_visit_ > kMaxNavigationsSince) { CleanTransientState(); } return; } bool force_same_tab_navigation = false; if (!continue_url_match && IsValidGaiaSigninRedirectOrResponseURL(url)) return; if (auto_accept_ == AUTO_ACCEPT_EXPLICIT) { DCHECK(source_ != SyncPromoUI::SOURCE_UNKNOWN); if (!continue_url_match) { VLOG(1) << "OneClickSigninHelper::DidStopLoading: invalid url='" << url.spec() << "' expected continue url=" << continue_url_; CleanTransientState(); return; } SyncPromoUI::Source source = SyncPromoUI::GetSourceForSyncPromoURL(url); if (source != source_) { original_source_ = source_; source_ = source; force_same_tab_navigation = source == SyncPromoUI::SOURCE_SETTINGS; switched_to_advanced_ = source == SyncPromoUI::SOURCE_SETTINGS; } } Browser* browser = chrome::FindBrowserWithWebContents(contents); VLOG(1) << "OneClickSigninHelper::DidStopLoading: signin is go." << " auto_accept=" << auto_accept_ << " source=" << source_; switch (auto_accept_) { case AUTO_ACCEPT_NONE: if (showing_signin_) LogOneClickHistogramValue(one_click_signin::HISTOGRAM_DISMISSED); break; case AUTO_ACCEPT_ACCEPTED: LogOneClickHistogramValue(one_click_signin::HISTOGRAM_ACCEPTED); LogOneClickHistogramValue(one_click_signin::HISTOGRAM_WITH_DEFAULTS); SigninManager::DisableOneClickSignIn(profile); StartSync(StartSyncArgs(profile, browser, auto_accept_, session_index_, email_, password_, false /* force_same_tab_navigation */, true /* confirmation_required */, source_), OneClickSigninSyncStarter::SYNC_WITH_DEFAULT_SETTINGS); break; case AUTO_ACCEPT_CONFIGURE: LogOneClickHistogramValue(one_click_signin::HISTOGRAM_ACCEPTED); LogOneClickHistogramValue(one_click_signin::HISTOGRAM_WITH_ADVANCED); SigninManager::DisableOneClickSignIn(profile); // Display the extra confirmation (even in the SAML case) in case this // was an untrusted renderer. StartSync( StartSyncArgs(profile, browser, auto_accept_, session_index_, email_, password_, false /* force_same_tab_navigation */, true /* confirmation_required */, source_), OneClickSigninSyncStarter::CONFIGURE_SYNC_FIRST); break; case AUTO_ACCEPT_EXPLICIT: { if (switched_to_advanced_) { LogHistogramValue(original_source_, one_click_signin::HISTOGRAM_WITH_ADVANCED); LogHistogramValue(original_source_, one_click_signin::HISTOGRAM_ACCEPTED); } else { LogHistogramValue(source_, one_click_signin::HISTOGRAM_ACCEPTED); LogHistogramValue(source_, one_click_signin::HISTOGRAM_WITH_DEFAULTS); } OneClickSigninSyncStarter::StartSyncMode start_mode = source_ == SyncPromoUI::SOURCE_SETTINGS ? OneClickSigninSyncStarter::CONFIGURE_SYNC_FIRST : OneClickSigninSyncStarter::SYNC_WITH_DEFAULT_SETTINGS; std::string last_email = profile->GetPrefs()->GetString(prefs::kGoogleServicesLastUsername); if (!last_email.empty() && !gaia::AreEmailsSame(last_email, email_)) { ConfirmEmailDialogDelegate::AskForConfirmation( contents, last_email, email_, base::Bind( &StartExplicitSync, StartSyncArgs(profile, browser, auto_accept_, session_index_, email_, password_, force_same_tab_navigation, false /* confirmation_required */, source_), contents, start_mode)); } else { StartSync( StartSyncArgs(profile, browser, auto_accept_, session_index_, email_, password_, force_same_tab_navigation, untrusted_confirmation_required_, source_), start_mode); RedirectToNtpOrAppsPageIfNecessary(web_contents(), source_); } if (source_ == SyncPromoUI::SOURCE_SETTINGS && SyncPromoUI::GetSourceForSyncPromoURL(continue_url_) == SyncPromoUI::SOURCE_WEBSTORE_INSTALL) { redirect_url_ = continue_url_; ProfileSyncService* sync_service = ProfileSyncServiceFactory::GetForProfile(profile); if (sync_service) sync_service->AddObserver(this); } break; } case AUTO_ACCEPT_REJECTED_FOR_PROFILE: AddEmailToOneClickRejectedList(profile, email_); LogOneClickHistogramValue(one_click_signin::HISTOGRAM_REJECTED); break; default: NOTREACHED() << "Invalid auto_accept=" << auto_accept_; break; } CleanTransientState(); }
171,243
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: mfr_print(netdissect_options *ndo, register const u_char *p, u_int length) { u_int tlen,idx,hdr_len = 0; uint16_t sequence_num; uint8_t ie_type,ie_len; const uint8_t *tptr; /* * FRF.16 Link Integrity Control Frame * * 7 6 5 4 3 2 1 0 * +----+----+----+----+----+----+----+----+ * | B | E | C=1| 0 0 0 0 | EA | * +----+----+----+----+----+----+----+----+ * | 0 0 0 0 0 0 0 0 | * +----+----+----+----+----+----+----+----+ * | message type | * +----+----+----+----+----+----+----+----+ */ ND_TCHECK2(*p, 4); /* minimum frame header length */ if ((p[0] & MFR_BEC_MASK) == MFR_CTRL_FRAME && p[1] == 0) { ND_PRINT((ndo, "FRF.16 Control, Flags [%s], %s, length %u", bittok2str(frf_flag_values,"none",(p[0] & MFR_BEC_MASK)), tok2str(mfr_ctrl_msg_values,"Unknown Message (0x%02x)",p[2]), length)); tptr = p + 3; tlen = length -3; hdr_len = 3; if (!ndo->ndo_vflag) return hdr_len; while (tlen>sizeof(struct ie_tlv_header_t)) { ND_TCHECK2(*tptr, sizeof(struct ie_tlv_header_t)); ie_type=tptr[0]; ie_len=tptr[1]; ND_PRINT((ndo, "\n\tIE %s (%u), length %u: ", tok2str(mfr_ctrl_ie_values,"Unknown",ie_type), ie_type, ie_len)); /* infinite loop check */ if (ie_type == 0 || ie_len <= sizeof(struct ie_tlv_header_t)) return hdr_len; ND_TCHECK2(*tptr, ie_len); tptr+=sizeof(struct ie_tlv_header_t); /* tlv len includes header */ ie_len-=sizeof(struct ie_tlv_header_t); tlen-=sizeof(struct ie_tlv_header_t); switch (ie_type) { case MFR_CTRL_IE_MAGIC_NUM: ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(tptr))); break; case MFR_CTRL_IE_BUNDLE_ID: /* same message format */ case MFR_CTRL_IE_LINK_ID: for (idx = 0; idx < ie_len && idx < MFR_ID_STRING_MAXLEN; idx++) { if (*(tptr+idx) != 0) /* don't print null termination */ safeputchar(ndo, *(tptr + idx)); else break; } break; case MFR_CTRL_IE_TIMESTAMP: if (ie_len == sizeof(struct timeval)) { ts_print(ndo, (const struct timeval *)tptr); break; } /* fall through and hexdump if no unix timestamp */ /* * FIXME those are the defined IEs that lack a decoder * you are welcome to contribute code ;-) */ case MFR_CTRL_IE_VENDOR_EXT: case MFR_CTRL_IE_CAUSE: default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, tptr, "\n\t ", ie_len); break; } /* do we want to see a hexdump of the IE ? */ if (ndo->ndo_vflag > 1 ) print_unknown_data(ndo, tptr, "\n\t ", ie_len); tlen-=ie_len; tptr+=ie_len; } return hdr_len; } /* * FRF.16 Fragmentation Frame * * 7 6 5 4 3 2 1 0 * +----+----+----+----+----+----+----+----+ * | B | E | C=0|seq. (high 4 bits) | EA | * +----+----+----+----+----+----+----+----+ * | sequence (low 8 bits) | * +----+----+----+----+----+----+----+----+ * | DLCI (6 bits) | CR | EA | * +----+----+----+----+----+----+----+----+ * | DLCI (4 bits) |FECN|BECN| DE | EA | * +----+----+----+----+----+----+----+----+ */ sequence_num = (p[0]&0x1e)<<7 | p[1]; /* whole packet or first fragment ? */ if ((p[0] & MFR_BEC_MASK) == MFR_FRAG_FRAME || (p[0] & MFR_BEC_MASK) == MFR_B_BIT) { ND_PRINT((ndo, "FRF.16 Frag, seq %u, Flags [%s], ", sequence_num, bittok2str(frf_flag_values,"none",(p[0] & MFR_BEC_MASK)))); hdr_len = 2; fr_print(ndo, p+hdr_len,length-hdr_len); return hdr_len; } /* must be a middle or the last fragment */ ND_PRINT((ndo, "FRF.16 Frag, seq %u, Flags [%s]", sequence_num, bittok2str(frf_flag_values,"none",(p[0] & MFR_BEC_MASK)))); print_unknown_data(ndo, p, "\n\t", length); return hdr_len; trunc: ND_PRINT((ndo, "[|mfr]")); return length; } Commit Message: (for 4.9.3) CVE-2018-14468/FRF.16: Add a missing length check. The specification says in a well-formed Magic Number information element the data is exactly 4 bytes long. In mfr_print() check this before trying to read those 4 bytes. This fixes a buffer over-read discovered by Bhargava Shastry, SecT/TU Berlin. Add a test using the capture file supplied by the reporter(s). CWE ID: CWE-125
mfr_print(netdissect_options *ndo, register const u_char *p, u_int length) { u_int tlen,idx,hdr_len = 0; uint16_t sequence_num; uint8_t ie_type,ie_len; const uint8_t *tptr; /* * FRF.16 Link Integrity Control Frame * * 7 6 5 4 3 2 1 0 * +----+----+----+----+----+----+----+----+ * | B | E | C=1| 0 0 0 0 | EA | * +----+----+----+----+----+----+----+----+ * | 0 0 0 0 0 0 0 0 | * +----+----+----+----+----+----+----+----+ * | message type | * +----+----+----+----+----+----+----+----+ */ ND_TCHECK2(*p, 4); /* minimum frame header length */ if ((p[0] & MFR_BEC_MASK) == MFR_CTRL_FRAME && p[1] == 0) { ND_PRINT((ndo, "FRF.16 Control, Flags [%s], %s, length %u", bittok2str(frf_flag_values,"none",(p[0] & MFR_BEC_MASK)), tok2str(mfr_ctrl_msg_values,"Unknown Message (0x%02x)",p[2]), length)); tptr = p + 3; tlen = length -3; hdr_len = 3; if (!ndo->ndo_vflag) return hdr_len; while (tlen>sizeof(struct ie_tlv_header_t)) { ND_TCHECK2(*tptr, sizeof(struct ie_tlv_header_t)); ie_type=tptr[0]; ie_len=tptr[1]; ND_PRINT((ndo, "\n\tIE %s (%u), length %u: ", tok2str(mfr_ctrl_ie_values,"Unknown",ie_type), ie_type, ie_len)); /* infinite loop check */ if (ie_type == 0 || ie_len <= sizeof(struct ie_tlv_header_t)) return hdr_len; ND_TCHECK2(*tptr, ie_len); tptr+=sizeof(struct ie_tlv_header_t); /* tlv len includes header */ ie_len-=sizeof(struct ie_tlv_header_t); tlen-=sizeof(struct ie_tlv_header_t); switch (ie_type) { case MFR_CTRL_IE_MAGIC_NUM: /* FRF.16.1 Section 3.4.3 Magic Number Information Element */ if (ie_len != 4) { ND_PRINT((ndo, "(invalid length)")); break; } ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(tptr))); break; case MFR_CTRL_IE_BUNDLE_ID: /* same message format */ case MFR_CTRL_IE_LINK_ID: for (idx = 0; idx < ie_len && idx < MFR_ID_STRING_MAXLEN; idx++) { if (*(tptr+idx) != 0) /* don't print null termination */ safeputchar(ndo, *(tptr + idx)); else break; } break; case MFR_CTRL_IE_TIMESTAMP: if (ie_len == sizeof(struct timeval)) { ts_print(ndo, (const struct timeval *)tptr); break; } /* fall through and hexdump if no unix timestamp */ /* * FIXME those are the defined IEs that lack a decoder * you are welcome to contribute code ;-) */ case MFR_CTRL_IE_VENDOR_EXT: case MFR_CTRL_IE_CAUSE: default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, tptr, "\n\t ", ie_len); break; } /* do we want to see a hexdump of the IE ? */ if (ndo->ndo_vflag > 1 ) print_unknown_data(ndo, tptr, "\n\t ", ie_len); tlen-=ie_len; tptr+=ie_len; } return hdr_len; } /* * FRF.16 Fragmentation Frame * * 7 6 5 4 3 2 1 0 * +----+----+----+----+----+----+----+----+ * | B | E | C=0|seq. (high 4 bits) | EA | * +----+----+----+----+----+----+----+----+ * | sequence (low 8 bits) | * +----+----+----+----+----+----+----+----+ * | DLCI (6 bits) | CR | EA | * +----+----+----+----+----+----+----+----+ * | DLCI (4 bits) |FECN|BECN| DE | EA | * +----+----+----+----+----+----+----+----+ */ sequence_num = (p[0]&0x1e)<<7 | p[1]; /* whole packet or first fragment ? */ if ((p[0] & MFR_BEC_MASK) == MFR_FRAG_FRAME || (p[0] & MFR_BEC_MASK) == MFR_B_BIT) { ND_PRINT((ndo, "FRF.16 Frag, seq %u, Flags [%s], ", sequence_num, bittok2str(frf_flag_values,"none",(p[0] & MFR_BEC_MASK)))); hdr_len = 2; fr_print(ndo, p+hdr_len,length-hdr_len); return hdr_len; } /* must be a middle or the last fragment */ ND_PRINT((ndo, "FRF.16 Frag, seq %u, Flags [%s]", sequence_num, bittok2str(frf_flag_values,"none",(p[0] & MFR_BEC_MASK)))); print_unknown_data(ndo, p, "\n\t", length); return hdr_len; trunc: ND_PRINT((ndo, "[|mfr]")); return length; }
169,843
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static JSValueRef updateTouchPointCallback(JSContextRef context, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) { if (argumentCount < 3) return JSValueMakeUndefined(context); int index = static_cast<int>(JSValueToNumber(context, arguments[0], exception)); ASSERT(!exception || !*exception); int x = static_cast<int>(JSValueToNumber(context, arguments[1], exception)); ASSERT(!exception || !*exception); int y = static_cast<int>(JSValueToNumber(context, arguments[2], exception)); ASSERT(!exception || !*exception); if (index < 0 || index >= (int)touches.size()) return JSValueMakeUndefined(context); BlackBerry::Platform::TouchPoint& touch = touches[index]; IntPoint pos(x, y); touch.m_pos = pos; touch.m_screenPos = pos; touch.m_state = BlackBerry::Platform::TouchPoint::TouchMoved; return JSValueMakeUndefined(context); } Commit Message: [BlackBerry] Adapt to new BlackBerry::Platform::TouchPoint API https://bugs.webkit.org/show_bug.cgi?id=105143 RIM PR 171941 Reviewed by Rob Buis. Internally reviewed by George Staikos. Source/WebCore: TouchPoint instances now provide document coordinates for the viewport and content position of the touch event. The pixel coordinates stored in the TouchPoint should no longer be needed in WebKit. Also adapt to new method names and encapsulation of TouchPoint data members. No change in behavior, no new tests. * platform/blackberry/PlatformTouchPointBlackBerry.cpp: (WebCore::PlatformTouchPoint::PlatformTouchPoint): Source/WebKit/blackberry: TouchPoint instances now provide document coordinates for the viewport and content position of the touch event. The pixel coordinates stored in the TouchPoint should no longer be needed in WebKit. One exception is when passing events to a full screen plugin. Also adapt to new method names and encapsulation of TouchPoint data members. * Api/WebPage.cpp: (BlackBerry::WebKit::WebPage::touchEvent): (BlackBerry::WebKit::WebPage::touchPointAsMouseEvent): (BlackBerry::WebKit::WebPagePrivate::dispatchTouchEventToFullScreenPlugin): (BlackBerry::WebKit::WebPagePrivate::dispatchTouchPointAsMouseEventToFullScreenPlugin): * WebKitSupport/InputHandler.cpp: (BlackBerry::WebKit::InputHandler::shouldRequestSpellCheckingOptionsForPoint): * WebKitSupport/InputHandler.h: (InputHandler): * WebKitSupport/TouchEventHandler.cpp: (BlackBerry::WebKit::TouchEventHandler::doFatFingers): (BlackBerry::WebKit::TouchEventHandler::handleTouchPoint): * WebKitSupport/TouchEventHandler.h: (TouchEventHandler): Tools: Adapt to new method names and encapsulation of TouchPoint data members. * DumpRenderTree/blackberry/EventSender.cpp: (addTouchPointCallback): (updateTouchPointCallback): (touchEndCallback): (releaseTouchPointCallback): (sendTouchEvent): git-svn-id: svn://svn.chromium.org/blink/trunk@137880 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID:
static JSValueRef updateTouchPointCallback(JSContextRef context, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) { if (argumentCount < 3) return JSValueMakeUndefined(context); int index = static_cast<int>(JSValueToNumber(context, arguments[0], exception)); ASSERT(!exception || !*exception); int x = static_cast<int>(JSValueToNumber(context, arguments[1], exception)); ASSERT(!exception || !*exception); int y = static_cast<int>(JSValueToNumber(context, arguments[2], exception)); ASSERT(!exception || !*exception); if (index < 0 || index >= (int)touches.size()) return JSValueMakeUndefined(context); BlackBerry::Platform::TouchPoint& touch = touches[index]; // pixelViewportPosition is unused in the WebKit layer IntPoint pos(x, y); // Unfortunately we don't know the scroll position at this point, so use pos for the content position too. // This assumes scroll position is 0,0 touch.populateDocumentPosition(pos, pos); touch.setScreenPosition(pos); touch.updateState(BlackBerry::Platform::TouchPoint::TouchMoved); return JSValueMakeUndefined(context); }
170,775
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode, char __user *buf, size_t len, bool compat) { struct file *file = req->ki_filp; ssize_t ret; unsigned long nr_segs; int rw; fmode_t mode; aio_rw_op *rw_op; rw_iter_op *iter_op; struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct iov_iter iter; switch (opcode) { case IOCB_CMD_PREAD: case IOCB_CMD_PREADV: mode = FMODE_READ; rw = READ; rw_op = file->f_op->aio_read; iter_op = file->f_op->read_iter; goto rw_common; case IOCB_CMD_PWRITE: case IOCB_CMD_PWRITEV: mode = FMODE_WRITE; rw = WRITE; rw_op = file->f_op->aio_write; iter_op = file->f_op->write_iter; goto rw_common; rw_common: if (unlikely(!(file->f_mode & mode))) return -EBADF; if (!rw_op && !iter_op) return -EINVAL; if (opcode == IOCB_CMD_PREADV || opcode == IOCB_CMD_PWRITEV) ret = aio_setup_vectored_rw(req, rw, buf, &nr_segs, &len, &iovec, compat); else ret = aio_setup_single_vector(req, rw, buf, &nr_segs, len, iovec); if (!ret) ret = rw_verify_area(rw, file, &req->ki_pos, len); if (ret < 0) { if (iovec != inline_vecs) kfree(iovec); return ret; } len = ret; /* XXX: move/kill - rw_verify_area()? */ /* This matches the pread()/pwrite() logic */ if (req->ki_pos < 0) { ret = -EINVAL; break; } if (rw == WRITE) file_start_write(file); if (iter_op) { iov_iter_init(&iter, rw, iovec, nr_segs, len); ret = iter_op(req, &iter); } else { ret = rw_op(req, iovec, nr_segs, req->ki_pos); } if (rw == WRITE) file_end_write(file); break; case IOCB_CMD_FDSYNC: if (!file->f_op->aio_fsync) return -EINVAL; ret = file->f_op->aio_fsync(req, 1); break; case IOCB_CMD_FSYNC: if (!file->f_op->aio_fsync) return -EINVAL; ret = file->f_op->aio_fsync(req, 0); break; default: pr_debug("EINVAL: no operation provided\n"); return -EINVAL; } if (iovec != inline_vecs) kfree(iovec); if (ret != -EIOCBQUEUED) { /* * There's no easy way to restart the syscall since other AIO's * may be already running. Just fail this IO with EINTR. */ if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR || ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK)) ret = -EINTR; aio_complete(req, ret, 0); } return 0; } Commit Message: aio: lift iov_iter_init() into aio_setup_..._rw() the only non-trivial detail is that we do it before rw_verify_area(), so we'd better cap the length ourselves in aio_setup_single_rw() case (for vectored case rw_copy_check_uvector() will do that for us). Signed-off-by: Al Viro <[email protected]> CWE ID:
static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode, char __user *buf, size_t len, bool compat) { struct file *file = req->ki_filp; ssize_t ret; unsigned long nr_segs; int rw; fmode_t mode; aio_rw_op *rw_op; rw_iter_op *iter_op; struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct iov_iter iter; switch (opcode) { case IOCB_CMD_PREAD: case IOCB_CMD_PREADV: mode = FMODE_READ; rw = READ; rw_op = file->f_op->aio_read; iter_op = file->f_op->read_iter; goto rw_common; case IOCB_CMD_PWRITE: case IOCB_CMD_PWRITEV: mode = FMODE_WRITE; rw = WRITE; rw_op = file->f_op->aio_write; iter_op = file->f_op->write_iter; goto rw_common; rw_common: if (unlikely(!(file->f_mode & mode))) return -EBADF; if (!rw_op && !iter_op) return -EINVAL; if (opcode == IOCB_CMD_PREADV || opcode == IOCB_CMD_PWRITEV) ret = aio_setup_vectored_rw(req, rw, buf, &nr_segs, &len, &iovec, compat, &iter); else ret = aio_setup_single_vector(req, rw, buf, &nr_segs, len, iovec, &iter); if (!ret) ret = rw_verify_area(rw, file, &req->ki_pos, len); if (ret < 0) { if (iovec != inline_vecs) kfree(iovec); return ret; } len = ret; /* XXX: move/kill - rw_verify_area()? */ /* This matches the pread()/pwrite() logic */ if (req->ki_pos < 0) { ret = -EINVAL; break; } if (rw == WRITE) file_start_write(file); if (iter_op) { ret = iter_op(req, &iter); } else { ret = rw_op(req, iter.iov, iter.nr_segs, req->ki_pos); } if (rw == WRITE) file_end_write(file); break; case IOCB_CMD_FDSYNC: if (!file->f_op->aio_fsync) return -EINVAL; ret = file->f_op->aio_fsync(req, 1); break; case IOCB_CMD_FSYNC: if (!file->f_op->aio_fsync) return -EINVAL; ret = file->f_op->aio_fsync(req, 0); break; default: pr_debug("EINVAL: no operation provided\n"); return -EINVAL; } if (iovec != inline_vecs) kfree(iovec); if (ret != -EIOCBQUEUED) { /* * There's no easy way to restart the syscall since other AIO's * may be already running. Just fail this IO with EINTR. */ if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR || ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK)) ret = -EINTR; aio_complete(req, ret, 0); } return 0; }
170,001
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void copyTrespass( short * /* dst */, const int *const * /* src */, unsigned /* nSamples */, unsigned /* nChannels */) { TRESPASS(); } Commit Message: FLACExtractor: copy protect mWriteBuffer Bug: 30895578 Change-Id: I4cba36bbe3502678210e5925181683df9726b431 CWE ID: CWE-119
static void copyTrespass( short * /* dst */, const int *[FLACParser::kMaxChannels] /* src */, unsigned /* nSamples */, unsigned /* nChannels */) { TRESPASS(); }
174,024
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: check(FILE *fp, int argc, const char **argv, png_uint_32p flags/*out*/, display *d, int set_callback) { int i, npasses, ipass; png_uint_32 height; d->keep = PNG_HANDLE_CHUNK_AS_DEFAULT; d->before_IDAT = 0; d->after_IDAT = 0; /* Some of these errors are permanently fatal and cause an exit here, others * are per-test and cause an error return. */ d->png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, d, error, warning); if (d->png_ptr == NULL) { fprintf(stderr, "%s(%s): could not allocate png struct\n", d->file, d->test); /* Terminate here, this error is not test specific. */ exit(1); } d->info_ptr = png_create_info_struct(d->png_ptr); d->end_ptr = png_create_info_struct(d->png_ptr); if (d->info_ptr == NULL || d->end_ptr == NULL) { fprintf(stderr, "%s(%s): could not allocate png info\n", d->file, d->test); clean_display(d); exit(1); } png_init_io(d->png_ptr, fp); # ifdef PNG_READ_USER_CHUNKS_SUPPORTED /* This is only done if requested by the caller; it interferes with the * standard store/save mechanism. */ if (set_callback) png_set_read_user_chunk_fn(d->png_ptr, d, read_callback); # else UNUSED(set_callback) # endif /* Handle each argument in turn; multiple settings are possible for the same * chunk and multiple calls will occur (the last one should override all * preceding ones). */ for (i=0; i<argc; ++i) { const char *equals = strchr(argv[i], '='); if (equals != NULL) { int chunk, option; if (strcmp(equals+1, "default") == 0) option = PNG_HANDLE_CHUNK_AS_DEFAULT; else if (strcmp(equals+1, "discard") == 0) option = PNG_HANDLE_CHUNK_NEVER; else if (strcmp(equals+1, "if-safe") == 0) option = PNG_HANDLE_CHUNK_IF_SAFE; else if (strcmp(equals+1, "save") == 0) option = PNG_HANDLE_CHUNK_ALWAYS; else { fprintf(stderr, "%s(%s): %s: unrecognized chunk option\n", d->file, d->test, argv[i]); display_exit(d); } switch (equals - argv[i]) { case 4: /* chunk name */ chunk = find(argv[i]); if (chunk >= 0) { /* These #if tests have the effect of skipping the arguments * if SAVE support is unavailable - we can't do a useful test * in this case, so we just check the arguments! This could * be improved in the future by using the read callback. */ png_byte name[5]; memcpy(name, chunk_info[chunk].name, 5); png_set_keep_unknown_chunks(d->png_ptr, option, name, 1); chunk_info[chunk].keep = option; continue; } break; case 7: /* default */ if (memcmp(argv[i], "default", 7) == 0) { png_set_keep_unknown_chunks(d->png_ptr, option, NULL, 0); d->keep = option; continue; } break; case 3: /* all */ if (memcmp(argv[i], "all", 3) == 0) { png_set_keep_unknown_chunks(d->png_ptr, option, NULL, -1); d->keep = option; for (chunk = 0; chunk < NINFO; ++chunk) if (chunk_info[chunk].all) chunk_info[chunk].keep = option; continue; } break; default: /* some misplaced = */ break; } } fprintf(stderr, "%s(%s): %s: unrecognized chunk argument\n", d->file, d->test, argv[i]); display_exit(d); } png_read_info(d->png_ptr, d->info_ptr); switch (png_get_interlace_type(d->png_ptr, d->info_ptr)) { case PNG_INTERLACE_NONE: npasses = 1; break; case PNG_INTERLACE_ADAM7: npasses = PNG_INTERLACE_ADAM7_PASSES; break; default: /* Hard error because it is not test specific */ fprintf(stderr, "%s(%s): invalid interlace type\n", d->file, d->test); clean_display(d); exit(1); } /* Skip the image data, if IDAT is not being handled then don't do this * because it will cause a CRC error. */ if (chunk_info[0/*IDAT*/].keep == PNG_HANDLE_CHUNK_AS_DEFAULT) { png_start_read_image(d->png_ptr); height = png_get_image_height(d->png_ptr, d->info_ptr); if (npasses > 1) { png_uint_32 width = png_get_image_width(d->png_ptr, d->info_ptr); for (ipass=0; ipass<npasses; ++ipass) { png_uint_32 wPass = PNG_PASS_COLS(width, ipass); if (wPass > 0) { png_uint_32 y; for (y=0; y<height; ++y) if (PNG_ROW_IN_INTERLACE_PASS(y, ipass)) png_read_row(d->png_ptr, NULL, NULL); } } } /* interlaced */ else /* not interlaced */ { png_uint_32 y; for (y=0; y<height; ++y) png_read_row(d->png_ptr, NULL, NULL); } } png_read_end(d->png_ptr, d->end_ptr); flags[0] = get_valid(d, d->info_ptr); flags[1] = get_unknown(d, d->info_ptr, 0/*before IDAT*/); /* Only png_read_png sets PNG_INFO_IDAT! */ flags[chunk_info[0/*IDAT*/].keep != PNG_HANDLE_CHUNK_AS_DEFAULT] |= PNG_INFO_IDAT; flags[2] = get_valid(d, d->end_ptr); flags[3] = get_unknown(d, d->end_ptr, 1/*after IDAT*/); clean_display(d); return d->keep; } Commit Message: DO NOT MERGE Update libpng to 1.6.20 BUG:23265085 Change-Id: I85199805636d771f3597b691b63bc0bf46084833 (cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82) CWE ID:
check(FILE *fp, int argc, const char **argv, png_uint_32p flags/*out*/, display *d, int set_callback) { int i, npasses, ipass; png_uint_32 height; d->keep = PNG_HANDLE_CHUNK_AS_DEFAULT; d->before_IDAT = 0; d->after_IDAT = 0; /* Some of these errors are permanently fatal and cause an exit here, others * are per-test and cause an error return. */ d->png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, d, error, warning); if (d->png_ptr == NULL) { fprintf(stderr, "%s(%s): could not allocate png struct\n", d->file, d->test); /* Terminate here, this error is not test specific. */ exit(1); } d->info_ptr = png_create_info_struct(d->png_ptr); d->end_ptr = png_create_info_struct(d->png_ptr); if (d->info_ptr == NULL || d->end_ptr == NULL) { fprintf(stderr, "%s(%s): could not allocate png info\n", d->file, d->test); clean_display(d); exit(1); } png_init_io(d->png_ptr, fp); # ifdef PNG_READ_USER_CHUNKS_SUPPORTED /* This is only done if requested by the caller; it interferes with the * standard store/save mechanism. */ if (set_callback) png_set_read_user_chunk_fn(d->png_ptr, d, read_callback); # else UNUSED(set_callback) # endif /* Handle each argument in turn; multiple settings are possible for the same * chunk and multiple calls will occur (the last one should override all * preceding ones). */ for (i=0; i<argc; ++i) { const char *equals = strchr(argv[i], '='); if (equals != NULL) { int chunk, option; if (strcmp(equals+1, "default") == 0) option = PNG_HANDLE_CHUNK_AS_DEFAULT; else if (strcmp(equals+1, "discard") == 0) option = PNG_HANDLE_CHUNK_NEVER; else if (strcmp(equals+1, "if-safe") == 0) option = PNG_HANDLE_CHUNK_IF_SAFE; else if (strcmp(equals+1, "save") == 0) option = PNG_HANDLE_CHUNK_ALWAYS; else { fprintf(stderr, "%s(%s): %s: unrecognized chunk option\n", d->file, d->test, argv[i]); display_exit(d); } switch (equals - argv[i]) { case 4: /* chunk name */ chunk = find(argv[i]); if (chunk >= 0) { /* These #if tests have the effect of skipping the arguments * if SAVE support is unavailable - we can't do a useful test * in this case, so we just check the arguments! This could * be improved in the future by using the read callback. */ # if PNG_LIBPNG_VER >= 10700 &&\ !defined(PNG_SAVE_UNKNOWN_CHUNKS_SUPPORTED) if (option < PNG_HANDLE_CHUNK_IF_SAFE) # endif /* 1.7+ SAVE_UNKNOWN_CHUNKS */ { png_byte name[5]; memcpy(name, chunk_info[chunk].name, 5); png_set_keep_unknown_chunks(d->png_ptr, option, name, 1); chunk_info[chunk].keep = option; } continue; } break; case 7: /* default */ if (memcmp(argv[i], "default", 7) == 0) { # if PNG_LIBPNG_VER >= 10700 &&\ !defined(PNG_SAVE_UNKNOWN_CHUNKS_SUPPORTED) if (option < PNG_HANDLE_CHUNK_IF_SAFE) # endif /* 1.7+ SAVE_UNKNOWN_CHUNKS */ png_set_keep_unknown_chunks(d->png_ptr, option, NULL, 0); d->keep = option; continue; } break; case 3: /* all */ if (memcmp(argv[i], "all", 3) == 0) { # if PNG_LIBPNG_VER >= 10700 &&\ !defined(PNG_SAVE_UNKNOWN_CHUNKS_SUPPORTED) if (option < PNG_HANDLE_CHUNK_IF_SAFE) # endif /* 1.7+ SAVE_UNKNOWN_CHUNKS */ png_set_keep_unknown_chunks(d->png_ptr, option, NULL, -1); d->keep = option; for (chunk = 0; chunk < NINFO; ++chunk) if (chunk_info[chunk].all) chunk_info[chunk].keep = option; continue; } break; default: /* some misplaced = */ break; } } fprintf(stderr, "%s(%s): %s: unrecognized chunk argument\n", d->file, d->test, argv[i]); display_exit(d); } png_read_info(d->png_ptr, d->info_ptr); switch (png_get_interlace_type(d->png_ptr, d->info_ptr)) { case PNG_INTERLACE_NONE: npasses = 1; break; case PNG_INTERLACE_ADAM7: npasses = PNG_INTERLACE_ADAM7_PASSES; break; default: /* Hard error because it is not test specific */ fprintf(stderr, "%s(%s): invalid interlace type\n", d->file, d->test); clean_display(d); exit(1); } /* Skip the image data, if IDAT is not being handled then don't do this * because it will cause a CRC error. */ if (chunk_info[0/*IDAT*/].keep == PNG_HANDLE_CHUNK_AS_DEFAULT) { png_start_read_image(d->png_ptr); height = png_get_image_height(d->png_ptr, d->info_ptr); if (npasses > 1) { png_uint_32 width = png_get_image_width(d->png_ptr, d->info_ptr); for (ipass=0; ipass<npasses; ++ipass) { png_uint_32 wPass = PNG_PASS_COLS(width, ipass); if (wPass > 0) { png_uint_32 y; for (y=0; y<height; ++y) if (PNG_ROW_IN_INTERLACE_PASS(y, ipass)) png_read_row(d->png_ptr, NULL, NULL); } } } /* interlaced */ else /* not interlaced */ { png_uint_32 y; for (y=0; y<height; ++y) png_read_row(d->png_ptr, NULL, NULL); } } png_read_end(d->png_ptr, d->end_ptr); flags[0] = get_valid(d, d->info_ptr); flags[1] = get_unknown(d, d->info_ptr, 0/*before IDAT*/); /* Only png_read_png sets PNG_INFO_IDAT! */ flags[chunk_info[0/*IDAT*/].keep != PNG_HANDLE_CHUNK_AS_DEFAULT] |= PNG_INFO_IDAT; flags[2] = get_valid(d, d->end_ptr); flags[3] = get_unknown(d, d->end_ptr, 1/*after IDAT*/); clean_display(d); return d->keep; }
173,598
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int ovl_rename2(struct inode *olddir, struct dentry *old, struct inode *newdir, struct dentry *new, unsigned int flags) { int err; enum ovl_path_type old_type; enum ovl_path_type new_type; struct dentry *old_upperdir; struct dentry *new_upperdir; struct dentry *olddentry; struct dentry *newdentry; struct dentry *trap; bool old_opaque; bool new_opaque; bool new_create = false; bool cleanup_whiteout = false; bool overwrite = !(flags & RENAME_EXCHANGE); bool is_dir = d_is_dir(old); bool new_is_dir = false; struct dentry *opaquedir = NULL; const struct cred *old_cred = NULL; struct cred *override_cred = NULL; err = -EINVAL; if (flags & ~(RENAME_EXCHANGE | RENAME_NOREPLACE)) goto out; flags &= ~RENAME_NOREPLACE; err = ovl_check_sticky(old); if (err) goto out; /* Don't copy up directory trees */ old_type = ovl_path_type(old); err = -EXDEV; if (OVL_TYPE_MERGE_OR_LOWER(old_type) && is_dir) goto out; if (new->d_inode) { err = ovl_check_sticky(new); if (err) goto out; if (d_is_dir(new)) new_is_dir = true; new_type = ovl_path_type(new); err = -EXDEV; if (!overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) goto out; err = 0; if (!OVL_TYPE_UPPER(new_type) && !OVL_TYPE_UPPER(old_type)) { if (ovl_dentry_lower(old)->d_inode == ovl_dentry_lower(new)->d_inode) goto out; } if (OVL_TYPE_UPPER(new_type) && OVL_TYPE_UPPER(old_type)) { if (ovl_dentry_upper(old)->d_inode == ovl_dentry_upper(new)->d_inode) goto out; } } else { if (ovl_dentry_is_opaque(new)) new_type = __OVL_PATH_UPPER; else new_type = __OVL_PATH_UPPER | __OVL_PATH_PURE; } err = ovl_want_write(old); if (err) goto out; err = ovl_copy_up(old); if (err) goto out_drop_write; err = ovl_copy_up(new->d_parent); if (err) goto out_drop_write; if (!overwrite) { err = ovl_copy_up(new); if (err) goto out_drop_write; } old_opaque = !OVL_TYPE_PURE_UPPER(old_type); new_opaque = !OVL_TYPE_PURE_UPPER(new_type); if (old_opaque || new_opaque) { err = -ENOMEM; override_cred = prepare_creds(); if (!override_cred) goto out_drop_write; /* * CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir * CAP_DAC_OVERRIDE for create in workdir * CAP_FOWNER for removing whiteout from sticky dir * CAP_FSETID for chmod of opaque dir * CAP_CHOWN for chown of opaque dir */ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); cap_raise(override_cred->cap_effective, CAP_FOWNER); cap_raise(override_cred->cap_effective, CAP_FSETID); cap_raise(override_cred->cap_effective, CAP_CHOWN); old_cred = override_creds(override_cred); } if (overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) { opaquedir = ovl_check_empty_and_clear(new); err = PTR_ERR(opaquedir); if (IS_ERR(opaquedir)) { opaquedir = NULL; goto out_revert_creds; } } if (overwrite) { if (old_opaque) { if (new->d_inode || !new_opaque) { /* Whiteout source */ flags |= RENAME_WHITEOUT; } else { /* Switch whiteouts */ flags |= RENAME_EXCHANGE; } } else if (is_dir && !new->d_inode && new_opaque) { flags |= RENAME_EXCHANGE; cleanup_whiteout = true; } } old_upperdir = ovl_dentry_upper(old->d_parent); new_upperdir = ovl_dentry_upper(new->d_parent); trap = lock_rename(new_upperdir, old_upperdir); olddentry = ovl_dentry_upper(old); newdentry = ovl_dentry_upper(new); if (newdentry) { if (opaquedir) { newdentry = opaquedir; opaquedir = NULL; } else { dget(newdentry); } } else { new_create = true; newdentry = lookup_one_len(new->d_name.name, new_upperdir, new->d_name.len); err = PTR_ERR(newdentry); if (IS_ERR(newdentry)) goto out_unlock; } err = -ESTALE; if (olddentry->d_parent != old_upperdir) goto out_dput; if (newdentry->d_parent != new_upperdir) goto out_dput; if (olddentry == trap) goto out_dput; if (newdentry == trap) goto out_dput; if (is_dir && !old_opaque && new_opaque) { err = ovl_set_opaque(olddentry); if (err) goto out_dput; } if (!overwrite && new_is_dir && old_opaque && !new_opaque) { err = ovl_set_opaque(newdentry); if (err) goto out_dput; } if (old_opaque || new_opaque) { err = ovl_do_rename(old_upperdir->d_inode, olddentry, new_upperdir->d_inode, newdentry, flags); } else { /* No debug for the plain case */ BUG_ON(flags & ~RENAME_EXCHANGE); err = vfs_rename(old_upperdir->d_inode, olddentry, new_upperdir->d_inode, newdentry, NULL, flags); } if (err) { if (is_dir && !old_opaque && new_opaque) ovl_remove_opaque(olddentry); if (!overwrite && new_is_dir && old_opaque && !new_opaque) ovl_remove_opaque(newdentry); goto out_dput; } if (is_dir && old_opaque && !new_opaque) ovl_remove_opaque(olddentry); if (!overwrite && new_is_dir && !old_opaque && new_opaque) ovl_remove_opaque(newdentry); /* * Old dentry now lives in different location. Dentries in * lowerstack are stale. We cannot drop them here because * access to them is lockless. This could be only pure upper * or opaque directory - numlower is zero. Or upper non-dir * entry - its pureness is tracked by flag opaque. */ if (old_opaque != new_opaque) { ovl_dentry_set_opaque(old, new_opaque); if (!overwrite) ovl_dentry_set_opaque(new, old_opaque); } if (cleanup_whiteout) ovl_cleanup(old_upperdir->d_inode, newdentry); ovl_dentry_version_inc(old->d_parent); ovl_dentry_version_inc(new->d_parent); out_dput: dput(newdentry); out_unlock: unlock_rename(new_upperdir, old_upperdir); out_revert_creds: if (old_opaque || new_opaque) { revert_creds(old_cred); put_cred(override_cred); } out_drop_write: ovl_drop_write(old); out: dput(opaquedir); return err; } Commit Message: ovl: verify upper dentry before unlink and rename Unlink and rename in overlayfs checked the upper dentry for staleness by verifying upper->d_parent against upperdir. However the dentry can go stale also by being unhashed, for example. Expand the verification to actually look up the name again (under parent lock) and check if it matches the upper dentry. This matches what the VFS does before passing the dentry to filesytem's unlink/rename methods, which excludes any inconsistency caused by overlayfs. Signed-off-by: Miklos Szeredi <[email protected]> CWE ID: CWE-20
static int ovl_rename2(struct inode *olddir, struct dentry *old, struct inode *newdir, struct dentry *new, unsigned int flags) { int err; enum ovl_path_type old_type; enum ovl_path_type new_type; struct dentry *old_upperdir; struct dentry *new_upperdir; struct dentry *olddentry; struct dentry *newdentry; struct dentry *trap; bool old_opaque; bool new_opaque; bool new_create = false; bool cleanup_whiteout = false; bool overwrite = !(flags & RENAME_EXCHANGE); bool is_dir = d_is_dir(old); bool new_is_dir = false; struct dentry *opaquedir = NULL; const struct cred *old_cred = NULL; struct cred *override_cred = NULL; err = -EINVAL; if (flags & ~(RENAME_EXCHANGE | RENAME_NOREPLACE)) goto out; flags &= ~RENAME_NOREPLACE; err = ovl_check_sticky(old); if (err) goto out; /* Don't copy up directory trees */ old_type = ovl_path_type(old); err = -EXDEV; if (OVL_TYPE_MERGE_OR_LOWER(old_type) && is_dir) goto out; if (new->d_inode) { err = ovl_check_sticky(new); if (err) goto out; if (d_is_dir(new)) new_is_dir = true; new_type = ovl_path_type(new); err = -EXDEV; if (!overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) goto out; err = 0; if (!OVL_TYPE_UPPER(new_type) && !OVL_TYPE_UPPER(old_type)) { if (ovl_dentry_lower(old)->d_inode == ovl_dentry_lower(new)->d_inode) goto out; } if (OVL_TYPE_UPPER(new_type) && OVL_TYPE_UPPER(old_type)) { if (ovl_dentry_upper(old)->d_inode == ovl_dentry_upper(new)->d_inode) goto out; } } else { if (ovl_dentry_is_opaque(new)) new_type = __OVL_PATH_UPPER; else new_type = __OVL_PATH_UPPER | __OVL_PATH_PURE; } err = ovl_want_write(old); if (err) goto out; err = ovl_copy_up(old); if (err) goto out_drop_write; err = ovl_copy_up(new->d_parent); if (err) goto out_drop_write; if (!overwrite) { err = ovl_copy_up(new); if (err) goto out_drop_write; } old_opaque = !OVL_TYPE_PURE_UPPER(old_type); new_opaque = !OVL_TYPE_PURE_UPPER(new_type); if (old_opaque || new_opaque) { err = -ENOMEM; override_cred = prepare_creds(); if (!override_cred) goto out_drop_write; /* * CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir * CAP_DAC_OVERRIDE for create in workdir * CAP_FOWNER for removing whiteout from sticky dir * CAP_FSETID for chmod of opaque dir * CAP_CHOWN for chown of opaque dir */ cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN); cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); cap_raise(override_cred->cap_effective, CAP_FOWNER); cap_raise(override_cred->cap_effective, CAP_FSETID); cap_raise(override_cred->cap_effective, CAP_CHOWN); old_cred = override_creds(override_cred); } if (overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) { opaquedir = ovl_check_empty_and_clear(new); err = PTR_ERR(opaquedir); if (IS_ERR(opaquedir)) { opaquedir = NULL; goto out_revert_creds; } } if (overwrite) { if (old_opaque) { if (new->d_inode || !new_opaque) { /* Whiteout source */ flags |= RENAME_WHITEOUT; } else { /* Switch whiteouts */ flags |= RENAME_EXCHANGE; } } else if (is_dir && !new->d_inode && new_opaque) { flags |= RENAME_EXCHANGE; cleanup_whiteout = true; } } old_upperdir = ovl_dentry_upper(old->d_parent); new_upperdir = ovl_dentry_upper(new->d_parent); trap = lock_rename(new_upperdir, old_upperdir); olddentry = lookup_one_len(old->d_name.name, old_upperdir, old->d_name.len); err = PTR_ERR(olddentry); if (IS_ERR(olddentry)) goto out_unlock; err = -ESTALE; if (olddentry != ovl_dentry_upper(old)) goto out_dput_old; newdentry = lookup_one_len(new->d_name.name, new_upperdir, new->d_name.len); err = PTR_ERR(newdentry); if (IS_ERR(newdentry)) goto out_dput_old; err = -ESTALE; if (ovl_dentry_upper(new)) { if (opaquedir) { if (newdentry != opaquedir) goto out_dput; } else { if (newdentry != ovl_dentry_upper(new)) goto out_dput; } } else { new_create = true; if (!d_is_negative(newdentry) && (!new_opaque || !ovl_is_whiteout(newdentry))) goto out_dput; } if (olddentry == trap) goto out_dput; if (newdentry == trap) goto out_dput; if (is_dir && !old_opaque && new_opaque) { err = ovl_set_opaque(olddentry); if (err) goto out_dput; } if (!overwrite && new_is_dir && old_opaque && !new_opaque) { err = ovl_set_opaque(newdentry); if (err) goto out_dput; } if (old_opaque || new_opaque) { err = ovl_do_rename(old_upperdir->d_inode, olddentry, new_upperdir->d_inode, newdentry, flags); } else { /* No debug for the plain case */ BUG_ON(flags & ~RENAME_EXCHANGE); err = vfs_rename(old_upperdir->d_inode, olddentry, new_upperdir->d_inode, newdentry, NULL, flags); } if (err) { if (is_dir && !old_opaque && new_opaque) ovl_remove_opaque(olddentry); if (!overwrite && new_is_dir && old_opaque && !new_opaque) ovl_remove_opaque(newdentry); goto out_dput; } if (is_dir && old_opaque && !new_opaque) ovl_remove_opaque(olddentry); if (!overwrite && new_is_dir && !old_opaque && new_opaque) ovl_remove_opaque(newdentry); /* * Old dentry now lives in different location. Dentries in * lowerstack are stale. We cannot drop them here because * access to them is lockless. This could be only pure upper * or opaque directory - numlower is zero. Or upper non-dir * entry - its pureness is tracked by flag opaque. */ if (old_opaque != new_opaque) { ovl_dentry_set_opaque(old, new_opaque); if (!overwrite) ovl_dentry_set_opaque(new, old_opaque); } if (cleanup_whiteout) ovl_cleanup(old_upperdir->d_inode, newdentry); ovl_dentry_version_inc(old->d_parent); ovl_dentry_version_inc(new->d_parent); out_dput: dput(newdentry); out_dput_old: dput(olddentry); out_unlock: unlock_rename(new_upperdir, old_upperdir); out_revert_creds: if (old_opaque || new_opaque) { revert_creds(old_cred); put_cred(override_cred); } out_drop_write: ovl_drop_write(old); out: dput(opaquedir); return err; }
167,015
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: const Track* Tracks::GetTrackByIndex(unsigned long idx) const { const ptrdiff_t count = m_trackEntriesEnd - m_trackEntries; if (idx >= static_cast<unsigned long>(count)) return NULL; return m_trackEntries[idx]; } Commit Message: libwebm: Pull from upstream Rolling mkvparser from upstream. Primarily for fixing a bug on parsing failures with certain Opus WebM files. Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae The diff is so huge because there were some style clean ups upstream. But it was ensured that there were no breaking changes when the style clean ups was done upstream. Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039 CWE ID: CWE-119
const Track* Tracks::GetTrackByIndex(unsigned long idx) const return m_trackEntries[idx]; }
174,370
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void v9fs_walk(void *opaque) { int name_idx; V9fsFidState *newfidp = NULL; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; err = pdu_unmarshal(pdu, offset, "ddw", &fid, &newfid, &nwnames); if (err < 0) { pdu_complete(pdu, err); return ; } V9fsFidState *newfidp = NULL; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; err = pdu_unmarshal(pdu, offset, "ddw", &fid, &newfid, &nwnames); if (err < 0) { for (i = 0; i < nwnames; i++) { err = pdu_unmarshal(pdu, offset, "s", &wnames[i]); if (err < 0) { goto out_nofid; } if (name_is_illegal(wnames[i].data)) { err = -ENOENT; goto out_nofid; } offset += err; } } else if (nwnames > P9_MAXWELEM) { err = -EINVAL; goto out_nofid; } fidp = get_fid(pdu, fid); if (fidp == NULL) { err = -ENOENT; goto out_nofid; } v9fs_path_init(&dpath); v9fs_path_init(&path); /* * Both dpath and path initially poin to fidp. * Needed to handle request with nwnames == 0 */ v9fs_path_copy(&dpath, &fidp->path); err = -ENOENT; goto out_nofid; } Commit Message: CWE ID: CWE-22
static void v9fs_walk(void *opaque) { int name_idx; V9fsFidState *newfidp = NULL; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; err = pdu_unmarshal(pdu, offset, "ddw", &fid, &newfid, &nwnames); if (err < 0) { pdu_complete(pdu, err); return ; } V9fsFidState *newfidp = NULL; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; V9fsQID qid; err = pdu_unmarshal(pdu, offset, "ddw", &fid, &newfid, &nwnames); if (err < 0) { for (i = 0; i < nwnames; i++) { err = pdu_unmarshal(pdu, offset, "s", &wnames[i]); if (err < 0) { goto out_nofid; } if (name_is_illegal(wnames[i].data)) { err = -ENOENT; goto out_nofid; } offset += err; } } else if (nwnames > P9_MAXWELEM) { err = -EINVAL; goto out_nofid; } fidp = get_fid(pdu, fid); if (fidp == NULL) { err = -ENOENT; goto out_nofid; } v9fs_path_init(&dpath); v9fs_path_init(&path); /* * Both dpath and path initially poin to fidp. * Needed to handle request with nwnames == 0 */ v9fs_path_copy(&dpath, &fidp->path); err = -ENOENT; goto out_nofid; }
164,939
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int jpc_pi_nextpcrl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { xstep = picomp->hsamp * (1 << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (1 << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (1 << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (1 << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; } Commit Message: Fixed numerous integer overflow problems in the code for packet iterators in the JPC decoder. CWE ID: CWE-125
static int jpc_pi_nextpcrl(register jpc_pi_t *pi) { int rlvlno; jpc_pirlvl_t *pirlvl; jpc_pchg_t *pchg; int prchind; int prcvind; int *prclyrno; int compno; jpc_picomp_t *picomp; int xstep; int ystep; uint_fast32_t trx0; uint_fast32_t try0; uint_fast32_t r; uint_fast32_t rpx; uint_fast32_t rpy; pchg = pi->pchg; if (!pi->prgvolfirst) { goto skip; } else { pi->xstep = 0; pi->ystep = 0; for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { // Check for the potential for overflow problems. if (pirlvl->prcwidthexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2 || pirlvl->prcheightexpn + pi->picomp->numrlvls > JAS_UINTFAST32_NUMBITS - 2) { return -1; } xstep = picomp->hsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcwidthexpn + picomp->numrlvls - rlvlno - 1)); ystep = picomp->vsamp * (JAS_CAST(uint_fast32_t, 1) << (pirlvl->prcheightexpn + picomp->numrlvls - rlvlno - 1)); pi->xstep = (!pi->xstep) ? xstep : JAS_MIN(pi->xstep, xstep); pi->ystep = (!pi->ystep) ? ystep : JAS_MIN(pi->ystep, ystep); } } pi->prgvolfirst = 0; } for (pi->y = pi->ystart; pi->y < pi->yend; pi->y += pi->ystep - (pi->y % pi->ystep)) { for (pi->x = pi->xstart; pi->x < pi->xend; pi->x += pi->xstep - (pi->x % pi->xstep)) { for (pi->compno = pchg->compnostart, pi->picomp = &pi->picomps[pi->compno]; pi->compno < pi->numcomps && pi->compno < JAS_CAST(int, pchg->compnoend); ++pi->compno, ++pi->picomp) { for (pi->rlvlno = pchg->rlvlnostart, pi->pirlvl = &pi->picomp->pirlvls[pi->rlvlno]; pi->rlvlno < pi->picomp->numrlvls && pi->rlvlno < pchg->rlvlnoend; ++pi->rlvlno, ++pi->pirlvl) { if (pi->pirlvl->numprcs == 0) { continue; } r = pi->picomp->numrlvls - 1 - pi->rlvlno; trx0 = JPC_CEILDIV(pi->xstart, pi->picomp->hsamp << r); try0 = JPC_CEILDIV(pi->ystart, pi->picomp->vsamp << r); rpx = r + pi->pirlvl->prcwidthexpn; rpy = r + pi->pirlvl->prcheightexpn; if (((pi->x == pi->xstart && ((trx0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpx))) || !(pi->x % (pi->picomp->hsamp << rpx))) && ((pi->y == pi->ystart && ((try0 << r) % (JAS_CAST(uint_fast32_t, 1) << rpy))) || !(pi->y % (pi->picomp->vsamp << rpy)))) { prchind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->x, pi->picomp->hsamp << r), pi->pirlvl->prcwidthexpn) - JPC_FLOORDIVPOW2(trx0, pi->pirlvl->prcwidthexpn); prcvind = JPC_FLOORDIVPOW2(JPC_CEILDIV(pi->y, pi->picomp->vsamp << r), pi->pirlvl->prcheightexpn) - JPC_FLOORDIVPOW2(try0, pi->pirlvl->prcheightexpn); pi->prcno = prcvind * pi->pirlvl->numhprcs + prchind; assert(pi->prcno < pi->pirlvl->numprcs); for (pi->lyrno = 0; pi->lyrno < pi->numlyrs && pi->lyrno < JAS_CAST(int, pchg->lyrnoend); ++pi->lyrno) { prclyrno = &pi->pirlvl->prclyrnos[pi->prcno]; if (pi->lyrno >= *prclyrno) { ++(*prclyrno); return 0; } skip: ; } } } } } } return 1; }
169,440
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: zend_op_array *compile_string(zval *source_string, char *filename TSRMLS_DC) { zend_lex_state original_lex_state; zend_op_array *op_array = (zend_op_array *) emalloc(sizeof(zend_op_array)); zend_op_array *original_active_op_array = CG(active_op_array); zend_op_array *retval; zval tmp; int compiler_result; zend_bool original_in_compilation = CG(in_compilation); if (source_string->value.str.len==0) { efree(op_array); return NULL; } CG(in_compilation) = 1; tmp = *source_string; zval_copy_ctor(&tmp); convert_to_string(&tmp); source_string = &tmp; zend_save_lexical_state(&original_lex_state TSRMLS_CC); if (zend_prepare_string_for_scanning(source_string, filename TSRMLS_CC)==FAILURE) { efree(op_array); retval = NULL; } else { zend_bool orig_interactive = CG(interactive); CG(interactive) = 0; init_op_array(op_array, ZEND_EVAL_CODE, INITIAL_OP_ARRAY_SIZE TSRMLS_CC); CG(interactive) = orig_interactive; CG(active_op_array) = op_array; zend_stack_push(&CG(context_stack), (void *) &CG(context), sizeof(CG(context))); zend_init_compiler_context(TSRMLS_C); BEGIN(ST_IN_SCRIPTING); compiler_result = zendparse(TSRMLS_C); if (SCNG(script_filtered)) { efree(SCNG(script_filtered)); SCNG(script_filtered) = NULL; } if (compiler_result==1) { CG(active_op_array) = original_active_op_array; CG(unclean_shutdown)=1; destroy_op_array(op_array TSRMLS_CC); efree(op_array); retval = NULL; } else { zend_do_return(NULL, 0 TSRMLS_CC); CG(active_op_array) = original_active_op_array; pass_two(op_array TSRMLS_CC); zend_release_labels(0 TSRMLS_CC); retval = op_array; } } zend_restore_lexical_state(&original_lex_state TSRMLS_CC); zval_dtor(&tmp); CG(in_compilation) = original_in_compilation; return retval; } Commit Message: fix bug #64660 - yyparse can return 2, not only 1 CWE ID: CWE-20
zend_op_array *compile_string(zval *source_string, char *filename TSRMLS_DC) { zend_lex_state original_lex_state; zend_op_array *op_array = (zend_op_array *) emalloc(sizeof(zend_op_array)); zend_op_array *original_active_op_array = CG(active_op_array); zend_op_array *retval; zval tmp; int compiler_result; zend_bool original_in_compilation = CG(in_compilation); if (source_string->value.str.len==0) { efree(op_array); return NULL; } CG(in_compilation) = 1; tmp = *source_string; zval_copy_ctor(&tmp); convert_to_string(&tmp); source_string = &tmp; zend_save_lexical_state(&original_lex_state TSRMLS_CC); if (zend_prepare_string_for_scanning(source_string, filename TSRMLS_CC)==FAILURE) { efree(op_array); retval = NULL; } else { zend_bool orig_interactive = CG(interactive); CG(interactive) = 0; init_op_array(op_array, ZEND_EVAL_CODE, INITIAL_OP_ARRAY_SIZE TSRMLS_CC); CG(interactive) = orig_interactive; CG(active_op_array) = op_array; zend_stack_push(&CG(context_stack), (void *) &CG(context), sizeof(CG(context))); zend_init_compiler_context(TSRMLS_C); BEGIN(ST_IN_SCRIPTING); compiler_result = zendparse(TSRMLS_C); if (SCNG(script_filtered)) { efree(SCNG(script_filtered)); SCNG(script_filtered) = NULL; } if (compiler_result != 0) { CG(active_op_array) = original_active_op_array; CG(unclean_shutdown)=1; destroy_op_array(op_array TSRMLS_CC); efree(op_array); retval = NULL; } else { zend_do_return(NULL, 0 TSRMLS_CC); CG(active_op_array) = original_active_op_array; pass_two(op_array TSRMLS_CC); zend_release_labels(0 TSRMLS_CC); retval = op_array; } } zend_restore_lexical_state(&original_lex_state TSRMLS_CC); zval_dtor(&tmp); CG(in_compilation) = original_in_compilation; return retval; }
166,024
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void AutofillPopupBaseView::DoShow() { const bool initialize_widget = !GetWidget(); if (initialize_widget) { if (parent_widget_) parent_widget_->AddObserver(this); views::Widget* widget = new views::Widget; views::Widget::InitParams params(views::Widget::InitParams::TYPE_POPUP); params.delegate = this; params.parent = parent_widget_ ? parent_widget_->GetNativeView() : delegate_->container_view(); AddExtraInitParams(&params); widget->Init(params); std::unique_ptr<views::View> wrapper = CreateWrapperView(); if (wrapper) widget->SetContentsView(wrapper.release()); widget->AddObserver(this); widget->SetVisibilityAnimationTransition(views::Widget::ANIMATE_HIDE); show_time_ = base::Time::Now(); } GetWidget()->GetRootView()->SetBorder(CreateBorder()); DoUpdateBoundsAndRedrawPopup(); GetWidget()->Show(); if (initialize_widget) views::WidgetFocusManager::GetInstance()->AddFocusChangeListener(this); } Commit Message: [Autofill] Remove AutofillPopupViewViews and associated feature. Bug: 906135,831603 Change-Id: I3c982f8b3ffb4928c7c878e74e10113999106499 Reviewed-on: https://chromium-review.googlesource.com/c/1387124 Reviewed-by: Robert Kaplow <[email protected]> Reviewed-by: Vasilii Sukhanov <[email protected]> Reviewed-by: Fabio Tirelo <[email protected]> Reviewed-by: Tommy Martino <[email protected]> Commit-Queue: Mathieu Perreault <[email protected]> Cr-Commit-Position: refs/heads/master@{#621360} CWE ID: CWE-416
void AutofillPopupBaseView::DoShow() { const bool initialize_widget = !GetWidget(); if (initialize_widget) { if (parent_widget_) parent_widget_->AddObserver(this); views::Widget* widget = new views::Widget; views::Widget::InitParams params(views::Widget::InitParams::TYPE_POPUP); params.delegate = this; params.parent = parent_widget_ ? parent_widget_->GetNativeView() : delegate_->container_view(); // Ensure the bubble border is not painted on an opaque background. params.opacity = views::Widget::InitParams::TRANSLUCENT_WINDOW; params.shadow_type = views::Widget::InitParams::SHADOW_TYPE_NONE; widget->Init(params); widget->AddObserver(this); widget->SetVisibilityAnimationTransition(views::Widget::ANIMATE_HIDE); show_time_ = base::Time::Now(); } GetWidget()->GetRootView()->SetBorder(CreateBorder()); DoUpdateBoundsAndRedrawPopup(); GetWidget()->Show(); if (initialize_widget) views::WidgetFocusManager::GetInstance()->AddFocusChangeListener(this); }
172,096
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) { struct rdma_umap_priv *priv, *next_priv; lockdep_assert_held(&ufile->hw_destroy_rwsem); while (1) { struct mm_struct *mm = NULL; /* Get an arbitrary mm pointer that hasn't been cleaned yet */ mutex_lock(&ufile->umap_lock); while (!list_empty(&ufile->umaps)) { int ret; priv = list_first_entry(&ufile->umaps, struct rdma_umap_priv, list); mm = priv->vma->vm_mm; ret = mmget_not_zero(mm); if (!ret) { list_del_init(&priv->list); mm = NULL; continue; } break; } mutex_unlock(&ufile->umap_lock); if (!mm) return; /* * The umap_lock is nested under mmap_sem since it used within * the vma_ops callbacks, so we have to clean the list one mm * at a time to get the lock ordering right. Typically there * will only be one mm, so no big deal. */ down_write(&mm->mmap_sem); mutex_lock(&ufile->umap_lock); list_for_each_entry_safe (priv, next_priv, &ufile->umaps, list) { struct vm_area_struct *vma = priv->vma; if (vma->vm_mm != mm) continue; list_del_init(&priv->list); zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE); } mutex_unlock(&ufile->umap_lock); up_write(&mm->mmap_sem); mmput(mm); } } Commit Message: coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping The core dumping code has always run without holding the mmap_sem for writing, despite that is the only way to ensure that the entire vma layout will not change from under it. Only using some signal serialization on the processes belonging to the mm is not nearly enough. This was pointed out earlier. For example in Hugh's post from Jul 2017: https://lkml.kernel.org/r/[email protected] "Not strictly relevant here, but a related note: I was very surprised to discover, only quite recently, how handle_mm_fault() may be called without down_read(mmap_sem) - when core dumping. That seems a misguided optimization to me, which would also be nice to correct" In particular because the growsdown and growsup can move the vm_start/vm_end the various loops the core dump does around the vma will not be consistent if page faults can happen concurrently. Pretty much all users calling mmget_not_zero()/get_task_mm() and then taking the mmap_sem had the potential to introduce unexpected side effects in the core dumping code. Adding mmap_sem for writing around the ->core_dump invocation is a viable long term fix, but it requires removing all copy user and page faults and to replace them with get_dump_page() for all binary formats which is not suitable as a short term fix. For the time being this solution manually covers the places that can confuse the core dump either by altering the vma layout or the vma flags while it runs. Once ->core_dump runs under mmap_sem for writing the function mmget_still_valid() can be dropped. Allowing mmap_sem protected sections to run in parallel with the coredump provides some minor parallelism advantage to the swapoff code (which seems to be safe enough by never mangling any vma field and can keep doing swapins in parallel to the core dumping) and to some other corner case. In order to facilitate the backporting I added "Fixes: 86039bd3b4e6" however the side effect of this same race condition in /proc/pid/mem should be reproducible since before 2.6.12-rc2 so I couldn't add any other "Fixes:" because there's no hash beyond the git genesis commit. Because find_extend_vma() is the only location outside of the process context that could modify the "mm" structures under mmap_sem for reading, by adding the mmget_still_valid() check to it, all other cases that take the mmap_sem for reading don't need the new check after mmget_not_zero()/get_task_mm(). The expand_stack() in page fault context also doesn't need the new check, because all tasks under core dumping are frozen. Link: http://lkml.kernel.org/r/[email protected] Fixes: 86039bd3b4e6 ("userfaultfd: add new syscall to provide memory externalization") Signed-off-by: Andrea Arcangeli <[email protected]> Reported-by: Jann Horn <[email protected]> Suggested-by: Oleg Nesterov <[email protected]> Acked-by: Peter Xu <[email protected]> Reviewed-by: Mike Rapoport <[email protected]> Reviewed-by: Oleg Nesterov <[email protected]> Reviewed-by: Jann Horn <[email protected]> Acked-by: Jason Gunthorpe <[email protected]> Acked-by: Michal Hocko <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> CWE ID: CWE-362
void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) { struct rdma_umap_priv *priv, *next_priv; lockdep_assert_held(&ufile->hw_destroy_rwsem); while (1) { struct mm_struct *mm = NULL; /* Get an arbitrary mm pointer that hasn't been cleaned yet */ mutex_lock(&ufile->umap_lock); while (!list_empty(&ufile->umaps)) { int ret; priv = list_first_entry(&ufile->umaps, struct rdma_umap_priv, list); mm = priv->vma->vm_mm; ret = mmget_not_zero(mm); if (!ret) { list_del_init(&priv->list); mm = NULL; continue; } break; } mutex_unlock(&ufile->umap_lock); if (!mm) return; /* * The umap_lock is nested under mmap_sem since it used within * the vma_ops callbacks, so we have to clean the list one mm * at a time to get the lock ordering right. Typically there * will only be one mm, so no big deal. */ down_write(&mm->mmap_sem); if (!mmget_still_valid(mm)) goto skip_mm; mutex_lock(&ufile->umap_lock); list_for_each_entry_safe (priv, next_priv, &ufile->umaps, list) { struct vm_area_struct *vma = priv->vma; if (vma->vm_mm != mm) continue; list_del_init(&priv->list); zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE); } mutex_unlock(&ufile->umap_lock); skip_mm: up_write(&mm->mmap_sem); mmput(mm); } }
169,684
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void AutoFillMetrics::Log(QualityMetric metric) const { DCHECK(metric < NUM_QUALITY_METRICS); UMA_HISTOGRAM_ENUMERATION("AutoFill.Quality", metric, NUM_QUALITY_METRICS); } Commit Message: Add support for autofill server experiments BUG=none TEST=unit_tests --gtest_filter=AutoFillMetricsTest.QualityMetricsWithExperimentId:AutoFillQueryXmlParserTest.ParseExperimentId Review URL: http://codereview.chromium.org/6260027 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@73216 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
void AutoFillMetrics::Log(QualityMetric metric) const { void AutoFillMetrics::Log(QualityMetric metric, const std::string& experiment_id) const { DCHECK(metric < NUM_QUALITY_METRICS); std::string histogram_name = "AutoFill.Quality"; if (!experiment_id.empty()) histogram_name += "_" + experiment_id; UMA_HISTOGRAM_ENUMERATION(histogram_name, metric, NUM_QUALITY_METRICS); }
170,652
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: process_plane(uint8 * in, int width, int height, uint8 * out, int size) { UNUSED(size); int indexw; int indexh; int code; int collen; int replen; int color; int x; int revcode; uint8 * last_line; uint8 * this_line; uint8 * org_in; uint8 * org_out; org_in = in; org_out = out; last_line = 0; indexh = 0; while (indexh < height) { out = (org_out + width * height * 4) - ((indexh + 1) * width * 4); color = 0; this_line = out; indexw = 0; if (last_line == 0) { while (indexw < width) { code = CVAL(in); replen = code & 0xf; collen = (code >> 4) & 0xf; revcode = (replen << 4) | collen; if ((revcode <= 47) && (revcode >= 16)) { replen = revcode; collen = 0; } while (collen > 0) { color = CVAL(in); *out = color; out += 4; indexw++; collen--; } while (replen > 0) { *out = color; out += 4; indexw++; replen--; } } } else { while (indexw < width) { code = CVAL(in); replen = code & 0xf; collen = (code >> 4) & 0xf; revcode = (replen << 4) | collen; if ((revcode <= 47) && (revcode >= 16)) { replen = revcode; collen = 0; } while (collen > 0) { x = CVAL(in); if (x & 1) { x = x >> 1; x = x + 1; color = -x; } else { x = x >> 1; color = x; } x = last_line[indexw * 4] + color; *out = x; out += 4; indexw++; collen--; } while (replen > 0) { x = last_line[indexw * 4] + color; *out = x; out += 4; indexw++; replen--; } } } indexh++; last_line = this_line; } return (int) (in - org_in); } Commit Message: Malicious RDP server security fixes This commit includes fixes for a set of 21 vulnerabilities in rdesktop when a malicious RDP server is used. All vulnerabilities was identified and reported by Eyal Itkin. * Add rdp_protocol_error function that is used in several fixes * Refactor of process_bitmap_updates * Fix possible integer overflow in s_check_rem() on 32bit arch * Fix memory corruption in process_bitmap_data - CVE-2018-8794 * Fix remote code execution in process_bitmap_data - CVE-2018-8795 * Fix remote code execution in process_plane - CVE-2018-8797 * Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175 * Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175 * Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176 * Fix Denial of Service in sec_recv - CVE-2018-20176 * Fix minor information leak in rdpdr_process - CVE-2018-8791 * Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792 * Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793 * Fix Denial of Service in process_bitmap_data - CVE-2018-8796 * Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798 * Fix Denial of Service in process_secondary_order - CVE-2018-8799 * Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800 * Fix major information leak in ui_clip_handle_data - CVE-2018-20174 * Fix memory corruption in rdp_in_unistr - CVE-2018-20177 * Fix Denial of Service in process_demand_active - CVE-2018-20178 * Fix remote code execution in lspci_process - CVE-2018-20179 * Fix remote code execution in rdpsnddbg_process - CVE-2018-20180 * Fix remote code execution in seamless_process - CVE-2018-20181 * Fix remote code execution in seamless_process_line - CVE-2018-20182 CWE ID: CWE-119
process_plane(uint8 * in, int width, int height, uint8 * out, int size) { UNUSED(size); int indexw; int indexh; int code; int collen; int replen; int color; int x; int revcode; uint8 * last_line; uint8 * this_line; uint8 * org_in; uint8 * org_out; org_in = in; org_out = out; last_line = 0; indexh = 0; while (indexh < height) { out = (org_out + width * height * 4) - ((indexh + 1) * width * 4); color = 0; this_line = out; indexw = 0; if (last_line == 0) { while (indexw < width) { code = CVAL(in); replen = code & 0xf; collen = (code >> 4) & 0xf; revcode = (replen << 4) | collen; if ((revcode <= 47) && (revcode >= 16)) { replen = revcode; collen = 0; } while (indexw < width && collen > 0) { color = CVAL(in); *out = color; out += 4; indexw++; collen--; } while (indexw < width && replen > 0) { *out = color; out += 4; indexw++; replen--; } } } else { while (indexw < width) { code = CVAL(in); replen = code & 0xf; collen = (code >> 4) & 0xf; revcode = (replen << 4) | collen; if ((revcode <= 47) && (revcode >= 16)) { replen = revcode; collen = 0; } while (indexw < width && collen > 0) { x = CVAL(in); if (x & 1) { x = x >> 1; x = x + 1; color = -x; } else { x = x >> 1; color = x; } x = last_line[indexw * 4] + color; *out = x; out += 4; indexw++; collen--; } while (indexw < width && replen > 0) { x = last_line[indexw * 4] + color; *out = x; out += 4; indexw++; replen--; } } } indexh++; last_line = this_line; } return (int) (in - org_in); }
169,795
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ping_unhash(struct sock *sk) { struct inet_sock *isk = inet_sk(sk); pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); if (sk_hashed(sk)) { write_lock_bh(&ping_table.lock); hlist_nulls_del(&sk->sk_nulls_node); sock_put(sk); isk->inet_num = 0; isk->inet_sport = 0; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); write_unlock_bh(&ping_table.lock); } } Commit Message: ipv4: Missing sk_nulls_node_init() in ping_unhash(). If we don't do that, then the poison value is left in the ->pprev backlink. This can cause crashes if we do a disconnect, followed by a connect(). Tested-by: Linus Torvalds <[email protected]> Reported-by: Wen Xu <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID:
void ping_unhash(struct sock *sk) { struct inet_sock *isk = inet_sk(sk); pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); if (sk_hashed(sk)) { write_lock_bh(&ping_table.lock); hlist_nulls_del(&sk->sk_nulls_node); sk_nulls_node_init(&sk->sk_nulls_node); sock_put(sk); isk->inet_num = 0; isk->inet_sport = 0; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); write_unlock_bh(&ping_table.lock); } }
166,623
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: horDiff16(TIFF* tif, uint8* cp0, tmsize_t cc) { TIFFPredictorState* sp = PredictorState(tif); tmsize_t stride = sp->stride; uint16 *wp = (uint16*) cp0; tmsize_t wc = cc/2; assert((cc%(2*stride))==0); if (wc > stride) { wc -= stride; wp += wc - 1; do { REPEAT4(stride, wp[stride] = (uint16)(((unsigned int)wp[stride] - (unsigned int)wp[0]) & 0xffff); wp--) wc -= stride; } while (wc > 0); } } Commit Message: * libtiff/tif_predict.h, libtiff/tif_predict.c: Replace assertions by runtime checks to avoid assertions in debug mode, or buffer overflows in release mode. Can happen when dealing with unusual tile size like YCbCr with subsampling. Reported as MSVR 35105 by Axel Souchet & Vishal Chauhan from the MSRC Vulnerabilities & Mitigations team. CWE ID: CWE-119
horDiff16(TIFF* tif, uint8* cp0, tmsize_t cc) { TIFFPredictorState* sp = PredictorState(tif); tmsize_t stride = sp->stride; uint16 *wp = (uint16*) cp0; tmsize_t wc = cc/2; if((cc%(2*stride))!=0) { TIFFErrorExt(tif->tif_clientdata, "horDiff8", "%s", "(cc%(2*stride))!=0"); return 0; } if (wc > stride) { wc -= stride; wp += wc - 1; do { REPEAT4(stride, wp[stride] = (uint16)(((unsigned int)wp[stride] - (unsigned int)wp[0]) & 0xffff); wp--) wc -= stride; } while (wc > 0); } return 1; }
166,885
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: BGD_DECLARE(void) gdImageWebpEx (gdImagePtr im, FILE * outFile, int quality) { gdIOCtx *out = gdNewFileCtx(outFile); if (out == NULL) { return; } gdImageWebpCtx(im, out, quality); out->gd_free(out); } Commit Message: Fix double-free in gdImageWebPtr() The issue is that gdImageWebpCtx() (which is called by gdImageWebpPtr() and the other WebP output functions to do the real work) does not return whether it succeeded or failed, so this is not checked in gdImageWebpPtr() and the function wrongly assumes everything is okay, which is not, in this case, because there is a size limitation for WebP, namely that the width and height must by less than 16383. We can't change the signature of gdImageWebpCtx() for API compatibility reasons, so we introduce the static helper _gdImageWebpCtx() which returns success respective failure, so gdImageWebpPtr() and gdImageWebpPtrEx() can check the return value. We leave it solely to libwebp for now to report warnings regarding the failing write. This issue had been reported by Ibrahim El-Sayed to [email protected]. CVE-2016-6912 CWE ID: CWE-415
BGD_DECLARE(void) gdImageWebpEx (gdImagePtr im, FILE * outFile, int quality) { gdIOCtx *out = gdNewFileCtx(outFile); if (out == NULL) { return; } _gdImageWebpCtx(im, out, quality); out->gd_free(out); }
168,818
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: gs_call_interp(i_ctx_t **pi_ctx_p, ref * pref, int user_errors, int *pexit_code, ref * perror_object) { ref *epref = pref; ref doref; ref *perrordict; ref error_name; int code, ccode; ref saref; i_ctx_t *i_ctx_p = *pi_ctx_p; int *gc_signal = &imemory_system->gs_lib_ctx->gcsignal; *pexit_code = 0; *gc_signal = 0; ialloc_reset_requested(idmemory); again: /* Avoid a dangling error object that might get traced by a future GC. */ make_null(perror_object); o_stack.requested = e_stack.requested = d_stack.requested = 0; while (*gc_signal) { /* Some routine below triggered a GC. */ gs_gc_root_t epref_root; *gc_signal = 0; /* Make sure that doref will get relocated properly if */ /* a garbage collection happens with epref == &doref. */ gs_register_ref_root(imemory_system, &epref_root, (void **)&epref, "gs_call_interp(epref)"); code = interp_reclaim(pi_ctx_p, -1); i_ctx_p = *pi_ctx_p; gs_unregister_root(imemory_system, &epref_root, "gs_call_interp(epref)"); if (code < 0) return code; } code = interp(pi_ctx_p, epref, perror_object); i_ctx_p = *pi_ctx_p; if (!r_has_type(&i_ctx_p->error_object, t__invalid)) { *perror_object = i_ctx_p->error_object; make_t(&i_ctx_p->error_object, t__invalid); } /* Prevent a dangling reference to the GC signal in ticks_left */ /* in the frame of interp, but be prepared to do a GC if */ /* an allocation in this routine asks for it. */ *gc_signal = 0; set_gc_signal(i_ctx_p, 1); if (esp < esbot) /* popped guard entry */ esp = esbot; switch (code) { case gs_error_Fatal: *pexit_code = 255; return code; case gs_error_Quit: *perror_object = osp[-1]; *pexit_code = code = osp->value.intval; osp -= 2; return (code == 0 ? gs_error_Quit : code < 0 && code > -100 ? code : gs_error_Fatal); case gs_error_InterpreterExit: return 0; case gs_error_ExecStackUnderflow: /****** WRONG -- must keep mark blocks intact ******/ ref_stack_pop_block(&e_stack); doref = *perror_object; epref = &doref; goto again; case gs_error_VMreclaim: /* Do the GC and continue. */ /* We ignore the return value here, if it fails here * we'll call it again having jumped to the "again" label. * Where, assuming it fails again, we'll handle the error. */ (void)interp_reclaim(pi_ctx_p, (osp->value.intval == 2 ? avm_global : avm_local)); i_ctx_p = *pi_ctx_p; make_oper(&doref, 0, zpop); epref = &doref; goto again; case gs_error_NeedInput: case gs_error_interrupt: return code; } /* Adjust osp in case of operand stack underflow */ if (osp < osbot - 1) osp = osbot - 1; /* We have to handle stack over/underflow specially, because */ /* we might be able to recover by adding or removing a block. */ switch (code) { case gs_error_dictstackoverflow: /* We don't have to handle this specially: */ /* The only places that could generate it */ /* use check_dstack, which does a ref_stack_extend, */ /* so if` we get this error, it's a real one. */ if (osp >= ostop) { if ((ccode = ref_stack_extend(&o_stack, 1)) < 0) return ccode; } /* Skip system dictionaries for CET 20-02-02 */ ccode = copy_stack(i_ctx_p, &d_stack, min_dstack_size, &saref); if (ccode < 0) return ccode; ref_stack_pop_to(&d_stack, min_dstack_size); dict_set_top(); *++osp = saref; break; case gs_error_dictstackunderflow: if (ref_stack_pop_block(&d_stack) >= 0) { dict_set_top(); doref = *perror_object; epref = &doref; goto again; } break; case gs_error_execstackoverflow: /* We don't have to handle this specially: */ /* The only places that could generate it */ /* use check_estack, which does a ref_stack_extend, */ /* so if we get this error, it's a real one. */ if (osp >= ostop) { if ((ccode = ref_stack_extend(&o_stack, 1)) < 0) return ccode; } ccode = copy_stack(i_ctx_p, &e_stack, 0, &saref); if (ccode < 0) return ccode; { uint count = ref_stack_count(&e_stack); uint limit = ref_stack_max_count(&e_stack) - ES_HEADROOM; if (count > limit) { /* * If there is an e-stack mark within MIN_BLOCK_ESTACK of * the new top, cut the stack back to remove the mark. */ int skip = count - limit; int i; for (i = skip; i < skip + MIN_BLOCK_ESTACK; ++i) { const ref *ep = ref_stack_index(&e_stack, i); if (r_has_type_attrs(ep, t_null, a_executable)) { skip = i + 1; break; } } pop_estack(i_ctx_p, skip); } } *++osp = saref; break; case gs_error_stackoverflow: if (ref_stack_extend(&o_stack, o_stack.requested) >= 0) { /* We can't just re-execute the object, because */ /* it might be a procedure being pushed as a */ /* literal. We check for this case specially. */ doref = *perror_object; if (r_is_proc(&doref)) { *++osp = doref; make_null_proc(&doref); } epref = &doref; goto again; } ccode = copy_stack(i_ctx_p, &o_stack, 0, &saref); if (ccode < 0) return ccode; ref_stack_clear(&o_stack); *++osp = saref; break; case gs_error_stackunderflow: if (ref_stack_pop_block(&o_stack) >= 0) { doref = *perror_object; epref = &doref; goto again; } break; } if (user_errors < 0) return code; if (gs_errorname(i_ctx_p, code, &error_name) < 0) return code; /* out-of-range error code! */ /* * For greater Adobe compatibility, only the standard PostScript errors * are defined in errordict; the rest are in gserrordict. */ if (dict_find_string(systemdict, "errordict", &perrordict) <= 0 || (dict_find(perrordict, &error_name, &epref) <= 0 && (dict_find_string(systemdict, "gserrordict", &perrordict) <= 0 || dict_find(perrordict, &error_name, &epref) <= 0)) ) return code; /* error name not in errordict??? */ doref = *epref; epref = &doref; /* Push the error object on the operand stack if appropriate. */ if (!GS_ERROR_IS_INTERRUPT(code)) { /* Replace the error object if within an oparray or .errorexec. */ *++osp = *perror_object; errorexec_find(i_ctx_p, osp); } goto again; } Commit Message: CWE ID: CWE-388
gs_call_interp(i_ctx_t **pi_ctx_p, ref * pref, int user_errors, int *pexit_code, ref * perror_object) { ref *epref = pref; ref doref; ref *perrordict; ref error_name; int code, ccode; ref saref; i_ctx_t *i_ctx_p = *pi_ctx_p; int *gc_signal = &imemory_system->gs_lib_ctx->gcsignal; *pexit_code = 0; *gc_signal = 0; ialloc_reset_requested(idmemory); again: /* Avoid a dangling error object that might get traced by a future GC. */ make_null(perror_object); o_stack.requested = e_stack.requested = d_stack.requested = 0; while (*gc_signal) { /* Some routine below triggered a GC. */ gs_gc_root_t epref_root; *gc_signal = 0; /* Make sure that doref will get relocated properly if */ /* a garbage collection happens with epref == &doref. */ gs_register_ref_root(imemory_system, &epref_root, (void **)&epref, "gs_call_interp(epref)"); code = interp_reclaim(pi_ctx_p, -1); i_ctx_p = *pi_ctx_p; gs_unregister_root(imemory_system, &epref_root, "gs_call_interp(epref)"); if (code < 0) return code; } code = interp(pi_ctx_p, epref, perror_object); i_ctx_p = *pi_ctx_p; if (!r_has_type(&i_ctx_p->error_object, t__invalid)) { *perror_object = i_ctx_p->error_object; make_t(&i_ctx_p->error_object, t__invalid); } /* Prevent a dangling reference to the GC signal in ticks_left */ /* in the frame of interp, but be prepared to do a GC if */ /* an allocation in this routine asks for it. */ *gc_signal = 0; set_gc_signal(i_ctx_p, 1); if (esp < esbot) /* popped guard entry */ esp = esbot; switch (code) { case gs_error_Fatal: *pexit_code = 255; return code; case gs_error_Quit: *perror_object = osp[-1]; *pexit_code = code = osp->value.intval; osp -= 2; return (code == 0 ? gs_error_Quit : code < 0 && code > -100 ? code : gs_error_Fatal); case gs_error_InterpreterExit: return 0; case gs_error_ExecStackUnderflow: /****** WRONG -- must keep mark blocks intact ******/ ref_stack_pop_block(&e_stack); doref = *perror_object; epref = &doref; goto again; case gs_error_VMreclaim: /* Do the GC and continue. */ /* We ignore the return value here, if it fails here * we'll call it again having jumped to the "again" label. * Where, assuming it fails again, we'll handle the error. */ (void)interp_reclaim(pi_ctx_p, (osp->value.intval == 2 ? avm_global : avm_local)); i_ctx_p = *pi_ctx_p; make_oper(&doref, 0, zpop); epref = &doref; goto again; case gs_error_NeedInput: case gs_error_interrupt: return code; } /* Adjust osp in case of operand stack underflow */ if (osp < osbot - 1) osp = osbot - 1; /* We have to handle stack over/underflow specially, because */ /* we might be able to recover by adding or removing a block. */ switch (code) { case gs_error_dictstackoverflow: /* We don't have to handle this specially: */ /* The only places that could generate it */ /* use check_dstack, which does a ref_stack_extend, */ /* so if` we get this error, it's a real one. */ if (osp >= ostop) { if ((ccode = ref_stack_extend(&o_stack, 1)) < 0) return ccode; } /* Skip system dictionaries for CET 20-02-02 */ ccode = copy_stack(i_ctx_p, &d_stack, min_dstack_size, &saref); if (ccode < 0) return ccode; ref_stack_pop_to(&d_stack, min_dstack_size); dict_set_top(); *++osp = saref; break; case gs_error_dictstackunderflow: if (ref_stack_pop_block(&d_stack) >= 0) { dict_set_top(); doref = *perror_object; epref = &doref; goto again; } break; case gs_error_execstackoverflow: /* We don't have to handle this specially: */ /* The only places that could generate it */ /* use check_estack, which does a ref_stack_extend, */ /* so if we get this error, it's a real one. */ if (osp >= ostop) { if ((ccode = ref_stack_extend(&o_stack, 1)) < 0) return ccode; } ccode = copy_stack(i_ctx_p, &e_stack, 0, &saref); if (ccode < 0) return ccode; { uint count = ref_stack_count(&e_stack); uint limit = ref_stack_max_count(&e_stack) - ES_HEADROOM; if (count > limit) { /* * If there is an e-stack mark within MIN_BLOCK_ESTACK of * the new top, cut the stack back to remove the mark. */ int skip = count - limit; int i; for (i = skip; i < skip + MIN_BLOCK_ESTACK; ++i) { const ref *ep = ref_stack_index(&e_stack, i); if (r_has_type_attrs(ep, t_null, a_executable)) { skip = i + 1; break; } } pop_estack(i_ctx_p, skip); } } *++osp = saref; break; case gs_error_stackoverflow: if (ref_stack_extend(&o_stack, o_stack.requested) >= 0) { /* We can't just re-execute the object, because */ /* it might be a procedure being pushed as a */ /* literal. We check for this case specially. */ doref = *perror_object; if (r_is_proc(&doref)) { *++osp = doref; make_null_proc(&doref); } epref = &doref; goto again; } ccode = copy_stack(i_ctx_p, &o_stack, 0, &saref); if (ccode < 0) return ccode; ref_stack_clear(&o_stack); *++osp = saref; break; case gs_error_stackunderflow: if (ref_stack_pop_block(&o_stack) >= 0) { doref = *perror_object; epref = &doref; goto again; } break; } if (user_errors < 0) return code; if (gs_errorname(i_ctx_p, code, &error_name) < 0) return code; /* out-of-range error code! */ /* * For greater Adobe compatibility, only the standard PostScript errors * are defined in errordict; the rest are in gserrordict. */ if (dict_find_string(systemdict, "errordict", &perrordict) <= 0 || (dict_find(perrordict, &error_name, &epref) <= 0 && (dict_find_string(systemdict, "gserrordict", &perrordict) <= 0 || dict_find(perrordict, &error_name, &epref) <= 0)) ) return code; /* error name not in errordict??? */ doref = *epref; epref = &doref; /* Push the error object on the operand stack if appropriate. */ if (!GS_ERROR_IS_INTERRUPT(code)) { /* Replace the error object if within an oparray or .errorexec. */ osp++; if (osp >= ostop) { *pexit_code = gs_error_Fatal; return_error(gs_error_Fatal); } *osp = *perror_object; errorexec_find(i_ctx_p, osp); } goto again; }
164,694
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type) { int name_index; void *value = NULL; size_t size = 0; int error; switch(type) { case ACL_TYPE_ACCESS: name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS; if (acl) { error = posix_acl_equiv_mode(acl, &inode->i_mode); if (error < 0) return error; else { inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); if (error == 0) acl = NULL; } } break; case ACL_TYPE_DEFAULT: name_index = EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT; if (!S_ISDIR(inode->i_mode)) return acl ? -EACCES : 0; break; default: return -EINVAL; } if (acl) { value = ext2_acl_to_disk(acl, &size); if (IS_ERR(value)) return (int)PTR_ERR(value); } error = ext2_xattr_set(inode, name_index, "", value, size, 0); kfree(value); if (!error) set_cached_acl(inode, type, acl); return error; } Commit Message: posix_acl: Clear SGID bit when setting file permissions When file permissions are modified via chmod(2) and the user is not in the owning group or capable of CAP_FSETID, the setgid bit is cleared in inode_change_ok(). Setting a POSIX ACL via setxattr(2) sets the file permissions as well as the new ACL, but doesn't clear the setgid bit in a similar way; this allows to bypass the check in chmod(2). Fix that. References: CVE-2016-7097 Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: Jeff Layton <[email protected]> Signed-off-by: Jan Kara <[email protected]> Signed-off-by: Andreas Gruenbacher <[email protected]> CWE ID: CWE-285
ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type) { int name_index; void *value = NULL; size_t size = 0; int error; switch(type) { case ACL_TYPE_ACCESS: name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS; if (acl) { error = posix_acl_update_mode(inode, &inode->i_mode, &acl); if (error) return error; inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); } break; case ACL_TYPE_DEFAULT: name_index = EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT; if (!S_ISDIR(inode->i_mode)) return acl ? -EACCES : 0; break; default: return -EINVAL; } if (acl) { value = ext2_acl_to_disk(acl, &size); if (IS_ERR(value)) return (int)PTR_ERR(value); } error = ext2_xattr_set(inode, name_index, "", value, size, 0); kfree(value); if (!error) set_cached_acl(inode, type, acl); return error; }
166,969
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: v8::Handle<v8::Value> V8WebGLRenderingContext::getUniformCallback(const v8::Arguments& args) { INC_STATS("DOM.WebGLRenderingContext.getUniform()"); if (args.Length() != 2) return V8Proxy::throwNotEnoughArgumentsError(); ExceptionCode ec = 0; WebGLRenderingContext* context = V8WebGLRenderingContext::toNative(args.Holder()); if (args.Length() > 0 && !isUndefinedOrNull(args[0]) && !V8WebGLProgram::HasInstance(args[0])) { V8Proxy::throwTypeError(); return notHandledByInterceptor(); } WebGLProgram* program = V8WebGLProgram::HasInstance(args[0]) ? V8WebGLProgram::toNative(v8::Handle<v8::Object>::Cast(args[0])) : 0; if (args.Length() > 1 && !isUndefinedOrNull(args[1]) && !V8WebGLUniformLocation::HasInstance(args[1])) { V8Proxy::throwTypeError(); return notHandledByInterceptor(); } bool ok = false; WebGLUniformLocation* location = toWebGLUniformLocation(args[1], ok); WebGLGetInfo info = context->getUniform(program, location, ec); if (ec) { V8Proxy::setDOMException(ec, args.GetIsolate()); return v8::Undefined(); } return toV8Object(info, args.GetIsolate()); } Commit Message: [V8] Pass Isolate to throwNotEnoughArgumentsError() https://bugs.webkit.org/show_bug.cgi?id=86983 Reviewed by Adam Barth. The objective is to pass Isolate around in V8 bindings. This patch passes Isolate to throwNotEnoughArgumentsError(). No tests. No change in behavior. * bindings/scripts/CodeGeneratorV8.pm: (GenerateArgumentsCountCheck): (GenerateEventConstructorCallback): * bindings/scripts/test/V8/V8Float64Array.cpp: (WebCore::Float64ArrayV8Internal::fooCallback): * bindings/scripts/test/V8/V8TestActiveDOMObject.cpp: (WebCore::TestActiveDOMObjectV8Internal::excitingFunctionCallback): (WebCore::TestActiveDOMObjectV8Internal::postMessageCallback): * bindings/scripts/test/V8/V8TestCustomNamedGetter.cpp: (WebCore::TestCustomNamedGetterV8Internal::anotherFunctionCallback): * bindings/scripts/test/V8/V8TestEventConstructor.cpp: (WebCore::V8TestEventConstructor::constructorCallback): * bindings/scripts/test/V8/V8TestEventTarget.cpp: (WebCore::TestEventTargetV8Internal::itemCallback): (WebCore::TestEventTargetV8Internal::dispatchEventCallback): * bindings/scripts/test/V8/V8TestInterface.cpp: (WebCore::TestInterfaceV8Internal::supplementalMethod2Callback): (WebCore::V8TestInterface::constructorCallback): * bindings/scripts/test/V8/V8TestMediaQueryListListener.cpp: (WebCore::TestMediaQueryListListenerV8Internal::methodCallback): * bindings/scripts/test/V8/V8TestNamedConstructor.cpp: (WebCore::V8TestNamedConstructorConstructorCallback): * bindings/scripts/test/V8/V8TestObj.cpp: (WebCore::TestObjV8Internal::voidMethodWithArgsCallback): (WebCore::TestObjV8Internal::intMethodWithArgsCallback): (WebCore::TestObjV8Internal::objMethodWithArgsCallback): (WebCore::TestObjV8Internal::methodWithSequenceArgCallback): (WebCore::TestObjV8Internal::methodReturningSequenceCallback): (WebCore::TestObjV8Internal::methodThatRequiresAllArgsAndThrowsCallback): (WebCore::TestObjV8Internal::serializedValueCallback): (WebCore::TestObjV8Internal::idbKeyCallback): (WebCore::TestObjV8Internal::optionsObjectCallback): (WebCore::TestObjV8Internal::methodWithNonOptionalArgAndOptionalArgCallback): (WebCore::TestObjV8Internal::methodWithNonOptionalArgAndTwoOptionalArgsCallback): (WebCore::TestObjV8Internal::methodWithCallbackArgCallback): (WebCore::TestObjV8Internal::methodWithNonCallbackArgAndCallbackArgCallback): (WebCore::TestObjV8Internal::overloadedMethod1Callback): (WebCore::TestObjV8Internal::overloadedMethod2Callback): (WebCore::TestObjV8Internal::overloadedMethod3Callback): (WebCore::TestObjV8Internal::overloadedMethod4Callback): (WebCore::TestObjV8Internal::overloadedMethod5Callback): (WebCore::TestObjV8Internal::overloadedMethod6Callback): (WebCore::TestObjV8Internal::overloadedMethod7Callback): (WebCore::TestObjV8Internal::overloadedMethod11Callback): (WebCore::TestObjV8Internal::overloadedMethod12Callback): (WebCore::TestObjV8Internal::enabledAtRuntimeMethod1Callback): (WebCore::TestObjV8Internal::enabledAtRuntimeMethod2Callback): (WebCore::TestObjV8Internal::convert1Callback): (WebCore::TestObjV8Internal::convert2Callback): (WebCore::TestObjV8Internal::convert3Callback): (WebCore::TestObjV8Internal::convert4Callback): (WebCore::TestObjV8Internal::convert5Callback): (WebCore::TestObjV8Internal::strictFunctionCallback): (WebCore::V8TestObj::constructorCallback): * bindings/scripts/test/V8/V8TestSerializedScriptValueInterface.cpp: (WebCore::TestSerializedScriptValueInterfaceV8Internal::acceptTransferListCallback): (WebCore::V8TestSerializedScriptValueInterface::constructorCallback): * bindings/v8/ScriptController.cpp: (WebCore::setValueAndClosePopupCallback): * bindings/v8/V8Proxy.cpp: (WebCore::V8Proxy::throwNotEnoughArgumentsError): * bindings/v8/V8Proxy.h: (V8Proxy): * bindings/v8/custom/V8AudioContextCustom.cpp: (WebCore::V8AudioContext::constructorCallback): * bindings/v8/custom/V8DataViewCustom.cpp: (WebCore::V8DataView::getInt8Callback): (WebCore::V8DataView::getUint8Callback): (WebCore::V8DataView::setInt8Callback): (WebCore::V8DataView::setUint8Callback): * bindings/v8/custom/V8DirectoryEntryCustom.cpp: (WebCore::V8DirectoryEntry::getDirectoryCallback): (WebCore::V8DirectoryEntry::getFileCallback): * bindings/v8/custom/V8IntentConstructor.cpp: (WebCore::V8Intent::constructorCallback): * bindings/v8/custom/V8SVGLengthCustom.cpp: (WebCore::V8SVGLength::convertToSpecifiedUnitsCallback): * bindings/v8/custom/V8WebGLRenderingContextCustom.cpp: (WebCore::getObjectParameter): (WebCore::V8WebGLRenderingContext::getAttachedShadersCallback): (WebCore::V8WebGLRenderingContext::getExtensionCallback): (WebCore::V8WebGLRenderingContext::getFramebufferAttachmentParameterCallback): (WebCore::V8WebGLRenderingContext::getParameterCallback): (WebCore::V8WebGLRenderingContext::getProgramParameterCallback): (WebCore::V8WebGLRenderingContext::getShaderParameterCallback): (WebCore::V8WebGLRenderingContext::getUniformCallback): (WebCore::vertexAttribAndUniformHelperf): (WebCore::uniformHelperi): (WebCore::uniformMatrixHelper): * bindings/v8/custom/V8WebKitMutationObserverCustom.cpp: (WebCore::V8WebKitMutationObserver::constructorCallback): (WebCore::V8WebKitMutationObserver::observeCallback): * bindings/v8/custom/V8WebSocketCustom.cpp: (WebCore::V8WebSocket::constructorCallback): (WebCore::V8WebSocket::sendCallback): * bindings/v8/custom/V8XMLHttpRequestCustom.cpp: (WebCore::V8XMLHttpRequest::openCallback): git-svn-id: svn://svn.chromium.org/blink/trunk@117736 bbb929c8-8fbe-4397-9dbb-9b2b20218538 CWE ID:
v8::Handle<v8::Value> V8WebGLRenderingContext::getUniformCallback(const v8::Arguments& args) { INC_STATS("DOM.WebGLRenderingContext.getUniform()"); if (args.Length() != 2) return V8Proxy::throwNotEnoughArgumentsError(args.GetIsolate()); ExceptionCode ec = 0; WebGLRenderingContext* context = V8WebGLRenderingContext::toNative(args.Holder()); if (args.Length() > 0 && !isUndefinedOrNull(args[0]) && !V8WebGLProgram::HasInstance(args[0])) { V8Proxy::throwTypeError(); return notHandledByInterceptor(); } WebGLProgram* program = V8WebGLProgram::HasInstance(args[0]) ? V8WebGLProgram::toNative(v8::Handle<v8::Object>::Cast(args[0])) : 0; if (args.Length() > 1 && !isUndefinedOrNull(args[1]) && !V8WebGLUniformLocation::HasInstance(args[1])) { V8Proxy::throwTypeError(); return notHandledByInterceptor(); } bool ok = false; WebGLUniformLocation* location = toWebGLUniformLocation(args[1], ok); WebGLGetInfo info = context->getUniform(program, location, ec); if (ec) { V8Proxy::setDOMException(ec, args.GetIsolate()); return v8::Undefined(); } return toV8Object(info, args.GetIsolate()); }
171,127
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int http_read_header(URLContext *h, int *new_location) { HTTPContext *s = h->priv_data; char line[MAX_URL_SIZE]; int err = 0; s->chunksize = -1; for (;;) { if ((err = http_get_line(s, line, sizeof(line))) < 0) return err; av_log(h, AV_LOG_TRACE, "header='%s'\n", line); err = process_line(h, line, s->line_count, new_location); if (err < 0) return err; if (err == 0) break; s->line_count++; } if (s->seekable == -1 && s->is_mediagateway && s->filesize == 2000000000) h->is_streamed = 1; /* we can in fact _not_ seek */ cookie_string(s->cookie_dict, &s->cookies); av_dict_free(&s->cookie_dict); return err; } Commit Message: http: make length/offset-related variables unsigned. Fixes #5992, reported and found by Paul Cher <[email protected]>. CWE ID: CWE-119
static int http_read_header(URLContext *h, int *new_location) { HTTPContext *s = h->priv_data; char line[MAX_URL_SIZE]; int err = 0; s->chunksize = UINT64_MAX; for (;;) { if ((err = http_get_line(s, line, sizeof(line))) < 0) return err; av_log(h, AV_LOG_TRACE, "header='%s'\n", line); err = process_line(h, line, s->line_count, new_location); if (err < 0) return err; if (err == 0) break; s->line_count++; } if (s->seekable == -1 && s->is_mediagateway && s->filesize == 2000000000) h->is_streamed = 1; /* we can in fact _not_ seek */ cookie_string(s->cookie_dict, &s->cookies); av_dict_free(&s->cookie_dict); return err; }
168,500
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, struct user_namespace *user_ns, struct fs_struct *new_fs) { struct mnt_namespace *new_ns; struct vfsmount *rootmnt = NULL, *pwdmnt = NULL; struct mount *p, *q; struct mount *old; struct mount *new; int copy_flags; BUG_ON(!ns); if (likely(!(flags & CLONE_NEWNS))) { get_mnt_ns(ns); return ns; } old = ns->root; new_ns = alloc_mnt_ns(user_ns); if (IS_ERR(new_ns)) return new_ns; namespace_lock(); /* First pass: copy the tree topology */ copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE; if (user_ns != ns->user_ns) copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED; new = copy_tree(old, old->mnt.mnt_root, copy_flags); if (IS_ERR(new)) { namespace_unlock(); free_mnt_ns(new_ns); return ERR_CAST(new); } new_ns->root = new; list_add_tail(&new_ns->list, &new->mnt_list); /* * Second pass: switch the tsk->fs->* elements and mark new vfsmounts * as belonging to new namespace. We have already acquired a private * fs_struct, so tsk->fs->lock is not needed. */ p = old; q = new; while (p) { q->mnt_ns = new_ns; if (new_fs) { if (&p->mnt == new_fs->root.mnt) { new_fs->root.mnt = mntget(&q->mnt); rootmnt = &p->mnt; } if (&p->mnt == new_fs->pwd.mnt) { new_fs->pwd.mnt = mntget(&q->mnt); pwdmnt = &p->mnt; } } p = next_mnt(p, old); q = next_mnt(q, new); if (!q) break; while (p->mnt.mnt_root != q->mnt.mnt_root) p = next_mnt(p, old); } namespace_unlock(); if (rootmnt) mntput(rootmnt); if (pwdmnt) mntput(pwdmnt); return new_ns; } Commit Message: mnt: Add a per mount namespace limit on the number of mounts CAI Qian <[email protected]> pointed out that the semantics of shared subtrees make it possible to create an exponentially increasing number of mounts in a mount namespace. mkdir /tmp/1 /tmp/2 mount --make-rshared / for i in $(seq 1 20) ; do mount --bind /tmp/1 /tmp/2 ; done Will create create 2^20 or 1048576 mounts, which is a practical problem as some people have managed to hit this by accident. As such CVE-2016-6213 was assigned. Ian Kent <[email protected]> described the situation for autofs users as follows: > The number of mounts for direct mount maps is usually not very large because of > the way they are implemented, large direct mount maps can have performance > problems. There can be anywhere from a few (likely case a few hundred) to less > than 10000, plus mounts that have been triggered and not yet expired. > > Indirect mounts have one autofs mount at the root plus the number of mounts that > have been triggered and not yet expired. > > The number of autofs indirect map entries can range from a few to the common > case of several thousand and in rare cases up to between 30000 and 50000. I've > not heard of people with maps larger than 50000 entries. > > The larger the number of map entries the greater the possibility for a large > number of active mounts so it's not hard to expect cases of a 1000 or somewhat > more active mounts. So I am setting the default number of mounts allowed per mount namespace at 100,000. This is more than enough for any use case I know of, but small enough to quickly stop an exponential increase in mounts. Which should be perfect to catch misconfigurations and malfunctioning programs. For anyone who needs a higher limit this can be changed by writing to the new /proc/sys/fs/mount-max sysctl. Tested-by: CAI Qian <[email protected]> Signed-off-by: "Eric W. Biederman" <[email protected]> CWE ID: CWE-400
struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, struct user_namespace *user_ns, struct fs_struct *new_fs) { struct mnt_namespace *new_ns; struct vfsmount *rootmnt = NULL, *pwdmnt = NULL; struct mount *p, *q; struct mount *old; struct mount *new; int copy_flags; BUG_ON(!ns); if (likely(!(flags & CLONE_NEWNS))) { get_mnt_ns(ns); return ns; } old = ns->root; new_ns = alloc_mnt_ns(user_ns); if (IS_ERR(new_ns)) return new_ns; namespace_lock(); /* First pass: copy the tree topology */ copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE; if (user_ns != ns->user_ns) copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED; new = copy_tree(old, old->mnt.mnt_root, copy_flags); if (IS_ERR(new)) { namespace_unlock(); free_mnt_ns(new_ns); return ERR_CAST(new); } new_ns->root = new; list_add_tail(&new_ns->list, &new->mnt_list); /* * Second pass: switch the tsk->fs->* elements and mark new vfsmounts * as belonging to new namespace. We have already acquired a private * fs_struct, so tsk->fs->lock is not needed. */ p = old; q = new; while (p) { q->mnt_ns = new_ns; new_ns->mounts++; if (new_fs) { if (&p->mnt == new_fs->root.mnt) { new_fs->root.mnt = mntget(&q->mnt); rootmnt = &p->mnt; } if (&p->mnt == new_fs->pwd.mnt) { new_fs->pwd.mnt = mntget(&q->mnt); pwdmnt = &p->mnt; } } p = next_mnt(p, old); q = next_mnt(q, new); if (!q) break; while (p->mnt.mnt_root != q->mnt.mnt_root) p = next_mnt(p, old); } namespace_unlock(); if (rootmnt) mntput(rootmnt); if (pwdmnt) mntput(pwdmnt); return new_ns; }
167,009
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) { int status = 0, size_change; int inode_locked = 0; struct inode *inode = d_inode(dentry); struct super_block *sb = inode->i_sb; struct ocfs2_super *osb = OCFS2_SB(sb); struct buffer_head *bh = NULL; handle_t *handle = NULL; struct dquot *transfer_to[MAXQUOTAS] = { }; int qtype; int had_lock; struct ocfs2_lock_holder oh; trace_ocfs2_setattr(inode, dentry, (unsigned long long)OCFS2_I(inode)->ip_blkno, dentry->d_name.len, dentry->d_name.name, attr->ia_valid, attr->ia_mode, from_kuid(&init_user_ns, attr->ia_uid), from_kgid(&init_user_ns, attr->ia_gid)); /* ensuring we don't even attempt to truncate a symlink */ if (S_ISLNK(inode->i_mode)) attr->ia_valid &= ~ATTR_SIZE; #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \ | ATTR_GID | ATTR_UID | ATTR_MODE) if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) return 0; status = setattr_prepare(dentry, attr); if (status) return status; if (is_quota_modification(inode, attr)) { status = dquot_initialize(inode); if (status) return status; } size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE; if (size_change) { status = ocfs2_rw_lock(inode, 1); if (status < 0) { mlog_errno(status); goto bail; } } had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh); if (had_lock < 0) { status = had_lock; goto bail_unlock_rw; } else if (had_lock) { /* * As far as we know, ocfs2_setattr() could only be the first * VFS entry point in the call chain of recursive cluster * locking issue. * * For instance: * chmod_common() * notify_change() * ocfs2_setattr() * posix_acl_chmod() * ocfs2_iop_get_acl() * * But, we're not 100% sure if it's always true, because the * ordering of the VFS entry points in the call chain is out * of our control. So, we'd better dump the stack here to * catch the other cases of recursive locking. */ mlog(ML_ERROR, "Another case of recursive locking:\n"); dump_stack(); } inode_locked = 1; if (size_change) { status = inode_newsize_ok(inode, attr->ia_size); if (status) goto bail_unlock; inode_dio_wait(inode); if (i_size_read(inode) >= attr->ia_size) { if (ocfs2_should_order_data(inode)) { status = ocfs2_begin_ordered_truncate(inode, attr->ia_size); if (status) goto bail_unlock; } status = ocfs2_truncate_file(inode, bh, attr->ia_size); } else status = ocfs2_extend_file(inode, bh, attr->ia_size); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); status = -ENOSPC; goto bail_unlock; } } if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { /* * Gather pointers to quota structures so that allocation / * freeing of quota structures happens here and not inside * dquot_transfer() where we have problems with lock ordering */ if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid) && OCFS2_HAS_RO_COMPAT_FEATURE(sb, OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) { transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid)); if (IS_ERR(transfer_to[USRQUOTA])) { status = PTR_ERR(transfer_to[USRQUOTA]); goto bail_unlock; } } if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid) && OCFS2_HAS_RO_COMPAT_FEATURE(sb, OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) { transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid)); if (IS_ERR(transfer_to[GRPQUOTA])) { status = PTR_ERR(transfer_to[GRPQUOTA]); goto bail_unlock; } } handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS + 2 * ocfs2_quota_trans_credits(sb)); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); goto bail_unlock; } status = __dquot_transfer(inode, transfer_to); if (status < 0) goto bail_commit; } else { handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); goto bail_unlock; } } setattr_copy(inode, attr); mark_inode_dirty(inode); status = ocfs2_mark_inode_dirty(handle, inode, bh); if (status < 0) mlog_errno(status); bail_commit: ocfs2_commit_trans(osb, handle); bail_unlock: if (status && inode_locked) { ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock); inode_locked = 0; } bail_unlock_rw: if (size_change) ocfs2_rw_unlock(inode, 1); bail: /* Release quota pointers in case we acquired them */ for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++) dqput(transfer_to[qtype]); if (!status && attr->ia_valid & ATTR_MODE) { status = ocfs2_acl_chmod(inode, bh); if (status < 0) mlog_errno(status); } if (inode_locked) ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock); brelse(bh); return status; } Commit Message: ocfs2: should wait dio before inode lock in ocfs2_setattr() we should wait dio requests to finish before inode lock in ocfs2_setattr(), otherwise the following deadlock will happen: process 1 process 2 process 3 truncate file 'A' end_io of writing file 'A' receiving the bast messages ocfs2_setattr ocfs2_inode_lock_tracker ocfs2_inode_lock_full inode_dio_wait __inode_dio_wait -->waiting for all dio requests finish dlm_proxy_ast_handler dlm_do_local_bast ocfs2_blocking_ast ocfs2_generic_handle_bast set OCFS2_LOCK_BLOCKED flag dio_end_io dio_bio_end_aio dio_complete ocfs2_dio_end_io ocfs2_dio_end_io_write ocfs2_inode_lock __ocfs2_cluster_lock ocfs2_wait_for_mask -->waiting for OCFS2_LOCK_BLOCKED flag to be cleared, that is waiting for 'process 1' unlocking the inode lock inode_dio_end -->here dec the i_dio_count, but will never be called, so a deadlock happened. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Alex Chen <[email protected]> Reviewed-by: Jun Piao <[email protected]> Reviewed-by: Joseph Qi <[email protected]> Acked-by: Changwei Ge <[email protected]> Cc: Mark Fasheh <[email protected]> Cc: Joel Becker <[email protected]> Cc: Junxiao Bi <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> CWE ID:
int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) { int status = 0, size_change; int inode_locked = 0; struct inode *inode = d_inode(dentry); struct super_block *sb = inode->i_sb; struct ocfs2_super *osb = OCFS2_SB(sb); struct buffer_head *bh = NULL; handle_t *handle = NULL; struct dquot *transfer_to[MAXQUOTAS] = { }; int qtype; int had_lock; struct ocfs2_lock_holder oh; trace_ocfs2_setattr(inode, dentry, (unsigned long long)OCFS2_I(inode)->ip_blkno, dentry->d_name.len, dentry->d_name.name, attr->ia_valid, attr->ia_mode, from_kuid(&init_user_ns, attr->ia_uid), from_kgid(&init_user_ns, attr->ia_gid)); /* ensuring we don't even attempt to truncate a symlink */ if (S_ISLNK(inode->i_mode)) attr->ia_valid &= ~ATTR_SIZE; #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \ | ATTR_GID | ATTR_UID | ATTR_MODE) if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) return 0; status = setattr_prepare(dentry, attr); if (status) return status; if (is_quota_modification(inode, attr)) { status = dquot_initialize(inode); if (status) return status; } size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE; if (size_change) { /* * Here we should wait dio to finish before inode lock * to avoid a deadlock between ocfs2_setattr() and * ocfs2_dio_end_io_write() */ inode_dio_wait(inode); status = ocfs2_rw_lock(inode, 1); if (status < 0) { mlog_errno(status); goto bail; } } had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh); if (had_lock < 0) { status = had_lock; goto bail_unlock_rw; } else if (had_lock) { /* * As far as we know, ocfs2_setattr() could only be the first * VFS entry point in the call chain of recursive cluster * locking issue. * * For instance: * chmod_common() * notify_change() * ocfs2_setattr() * posix_acl_chmod() * ocfs2_iop_get_acl() * * But, we're not 100% sure if it's always true, because the * ordering of the VFS entry points in the call chain is out * of our control. So, we'd better dump the stack here to * catch the other cases of recursive locking. */ mlog(ML_ERROR, "Another case of recursive locking:\n"); dump_stack(); } inode_locked = 1; if (size_change) { status = inode_newsize_ok(inode, attr->ia_size); if (status) goto bail_unlock; if (i_size_read(inode) >= attr->ia_size) { if (ocfs2_should_order_data(inode)) { status = ocfs2_begin_ordered_truncate(inode, attr->ia_size); if (status) goto bail_unlock; } status = ocfs2_truncate_file(inode, bh, attr->ia_size); } else status = ocfs2_extend_file(inode, bh, attr->ia_size); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); status = -ENOSPC; goto bail_unlock; } } if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { /* * Gather pointers to quota structures so that allocation / * freeing of quota structures happens here and not inside * dquot_transfer() where we have problems with lock ordering */ if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid) && OCFS2_HAS_RO_COMPAT_FEATURE(sb, OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) { transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid)); if (IS_ERR(transfer_to[USRQUOTA])) { status = PTR_ERR(transfer_to[USRQUOTA]); goto bail_unlock; } } if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid) && OCFS2_HAS_RO_COMPAT_FEATURE(sb, OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) { transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid)); if (IS_ERR(transfer_to[GRPQUOTA])) { status = PTR_ERR(transfer_to[GRPQUOTA]); goto bail_unlock; } } handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS + 2 * ocfs2_quota_trans_credits(sb)); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); goto bail_unlock; } status = __dquot_transfer(inode, transfer_to); if (status < 0) goto bail_commit; } else { handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); goto bail_unlock; } } setattr_copy(inode, attr); mark_inode_dirty(inode); status = ocfs2_mark_inode_dirty(handle, inode, bh); if (status < 0) mlog_errno(status); bail_commit: ocfs2_commit_trans(osb, handle); bail_unlock: if (status && inode_locked) { ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock); inode_locked = 0; } bail_unlock_rw: if (size_change) ocfs2_rw_unlock(inode, 1); bail: /* Release quota pointers in case we acquired them */ for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++) dqput(transfer_to[qtype]); if (!status && attr->ia_valid & ATTR_MODE) { status = ocfs2_acl_chmod(inode, bh); if (status < 0) mlog_errno(status); } if (inode_locked) ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock); brelse(bh); return status; }
169,410
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: enum ImapAuthRes imap_auth_cram_md5(struct ImapData *idata, const char *method) { char ibuf[LONG_STRING * 2], obuf[LONG_STRING]; unsigned char hmac_response[MD5_DIGEST_LEN]; int len; int rc; if (!mutt_bit_isset(idata->capabilities, ACRAM_MD5)) return IMAP_AUTH_UNAVAIL; mutt_message(_("Authenticating (CRAM-MD5)...")); /* get auth info */ if (mutt_account_getlogin(&idata->conn->account) < 0) return IMAP_AUTH_FAILURE; if (mutt_account_getpass(&idata->conn->account) < 0) return IMAP_AUTH_FAILURE; imap_cmd_start(idata, "AUTHENTICATE CRAM-MD5"); /* From RFC2195: * The data encoded in the first ready response contains a presumptively * arbitrary string of random digits, a timestamp, and the fully-qualified * primary host name of the server. The syntax of the unencoded form must * correspond to that of an RFC822 'msg-id' [RFC822] as described in [POP3]. */ do rc = imap_cmd_step(idata); while (rc == IMAP_CMD_CONTINUE); if (rc != IMAP_CMD_RESPOND) { mutt_debug(1, "Invalid response from server: %s\n", ibuf); goto bail; } len = mutt_b64_decode(obuf, idata->buf + 2); if (len == -1) { mutt_debug(1, "Error decoding base64 response.\n"); goto bail; } obuf[len] = '\0'; mutt_debug(2, "CRAM challenge: %s\n", obuf); /* The client makes note of the data and then responds with a string * consisting of the user name, a space, and a 'digest'. The latter is * computed by applying the keyed MD5 algorithm from [KEYED-MD5] where the * key is a shared secret and the digested text is the timestamp (including * angle-brackets). * * Note: The user name shouldn't be quoted. Since the digest can't contain * spaces, there is no ambiguity. Some servers get this wrong, we'll work * around them when the bug report comes in. Until then, we'll remain * blissfully RFC-compliant. */ hmac_md5(idata->conn->account.pass, obuf, hmac_response); /* dubious optimisation I saw elsewhere: make the whole string in one call */ int off = snprintf(obuf, sizeof(obuf), "%s ", idata->conn->account.user); mutt_md5_toascii(hmac_response, obuf + off); mutt_debug(2, "CRAM response: %s\n", obuf); /* ibuf must be long enough to store the base64 encoding of obuf, * plus the additional debris */ mutt_b64_encode(ibuf, obuf, strlen(obuf), sizeof(ibuf) - 2); mutt_str_strcat(ibuf, sizeof(ibuf), "\r\n"); mutt_socket_send(idata->conn, ibuf); do rc = imap_cmd_step(idata); while (rc == IMAP_CMD_CONTINUE); if (rc != IMAP_CMD_OK) { mutt_debug(1, "Error receiving server response.\n"); goto bail; } if (imap_code(idata->buf)) return IMAP_AUTH_SUCCESS; bail: mutt_error(_("CRAM-MD5 authentication failed.")); return IMAP_AUTH_FAILURE; } Commit Message: Check outbuf length in mutt_to_base64() The obuf can be overflowed in auth_cram.c, and possibly auth_gss.c. Thanks to Jeriko One for the bug report. CWE ID: CWE-119
enum ImapAuthRes imap_auth_cram_md5(struct ImapData *idata, const char *method) { char ibuf[LONG_STRING * 2], obuf[LONG_STRING]; unsigned char hmac_response[MD5_DIGEST_LEN]; int len; int rc; if (!mutt_bit_isset(idata->capabilities, ACRAM_MD5)) return IMAP_AUTH_UNAVAIL; mutt_message(_("Authenticating (CRAM-MD5)...")); /* get auth info */ if (mutt_account_getlogin(&idata->conn->account) < 0) return IMAP_AUTH_FAILURE; if (mutt_account_getpass(&idata->conn->account) < 0) return IMAP_AUTH_FAILURE; imap_cmd_start(idata, "AUTHENTICATE CRAM-MD5"); /* From RFC2195: * The data encoded in the first ready response contains a presumptively * arbitrary string of random digits, a timestamp, and the fully-qualified * primary host name of the server. The syntax of the unencoded form must * correspond to that of an RFC822 'msg-id' [RFC822] as described in [POP3]. */ do rc = imap_cmd_step(idata); while (rc == IMAP_CMD_CONTINUE); if (rc != IMAP_CMD_RESPOND) { mutt_debug(1, "Invalid response from server: %s\n", ibuf); goto bail; } len = mutt_b64_decode(obuf, idata->buf + 2, sizeof(obuf)); if (len == -1) { mutt_debug(1, "Error decoding base64 response.\n"); goto bail; } obuf[len] = '\0'; mutt_debug(2, "CRAM challenge: %s\n", obuf); /* The client makes note of the data and then responds with a string * consisting of the user name, a space, and a 'digest'. The latter is * computed by applying the keyed MD5 algorithm from [KEYED-MD5] where the * key is a shared secret and the digested text is the timestamp (including * angle-brackets). * * Note: The user name shouldn't be quoted. Since the digest can't contain * spaces, there is no ambiguity. Some servers get this wrong, we'll work * around them when the bug report comes in. Until then, we'll remain * blissfully RFC-compliant. */ hmac_md5(idata->conn->account.pass, obuf, hmac_response); /* dubious optimisation I saw elsewhere: make the whole string in one call */ int off = snprintf(obuf, sizeof(obuf), "%s ", idata->conn->account.user); mutt_md5_toascii(hmac_response, obuf + off); mutt_debug(2, "CRAM response: %s\n", obuf); /* ibuf must be long enough to store the base64 encoding of obuf, * plus the additional debris */ mutt_b64_encode(ibuf, obuf, strlen(obuf), sizeof(ibuf) - 2); mutt_str_strcat(ibuf, sizeof(ibuf), "\r\n"); mutt_socket_send(idata->conn, ibuf); do rc = imap_cmd_step(idata); while (rc == IMAP_CMD_CONTINUE); if (rc != IMAP_CMD_OK) { mutt_debug(1, "Error receiving server response.\n"); goto bail; } if (imap_code(idata->buf)) return IMAP_AUTH_SUCCESS; bail: mutt_error(_("CRAM-MD5 authentication failed.")); return IMAP_AUTH_FAILURE; }
169,126
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); unsigned int copied, rlen; struct sk_buff *skb, *rskb, *cskb; int err = 0; u32 offset; msg->msg_namelen = 0; if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) && list_empty(&iucv->message_q.list)) return 0; if (flags & (MSG_OOB)) return -EOPNOTSUPP; /* receive/dequeue next skb: * the function understands MSG_PEEK and, thus, does not dequeue skb */ skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) return 0; return err; } offset = IUCV_SKB_CB(skb)->offset; rlen = skb->len - offset; /* real length of skb */ copied = min_t(unsigned int, rlen, len); if (!rlen) sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; cskb = skb; if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) { if (!(flags & MSG_PEEK)) skb_queue_head(&sk->sk_receive_queue, skb); return -EFAULT; } /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */ if (sk->sk_type == SOCK_SEQPACKET) { if (copied < rlen) msg->msg_flags |= MSG_TRUNC; /* each iucv message contains a complete record */ msg->msg_flags |= MSG_EOR; } /* create control message to store iucv msg target class: * get the trgcls from the control buffer of the skb due to * fragmentation of original iucv message. */ err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, sizeof(IUCV_SKB_CB(skb)->class), (void *)&IUCV_SKB_CB(skb)->class); if (err) { if (!(flags & MSG_PEEK)) skb_queue_head(&sk->sk_receive_queue, skb); return err; } /* Mark read part of skb as used */ if (!(flags & MSG_PEEK)) { /* SOCK_STREAM: re-queue skb if it contains unreceived data */ if (sk->sk_type == SOCK_STREAM) { if (copied < rlen) { IUCV_SKB_CB(skb)->offset = offset + copied; goto done; } } kfree_skb(skb); if (iucv->transport == AF_IUCV_TRANS_HIPER) { atomic_inc(&iucv->msg_recv); if (atomic_read(&iucv->msg_recv) > iucv->msglimit) { WARN_ON(1); iucv_sock_close(sk); return -EFAULT; } } /* Queue backlog skbs */ spin_lock_bh(&iucv->message_q.lock); rskb = skb_dequeue(&iucv->backlog_skb_q); while (rskb) { IUCV_SKB_CB(rskb)->offset = 0; if (sock_queue_rcv_skb(sk, rskb)) { skb_queue_head(&iucv->backlog_skb_q, rskb); break; } else { rskb = skb_dequeue(&iucv->backlog_skb_q); } } if (skb_queue_empty(&iucv->backlog_skb_q)) { if (!list_empty(&iucv->message_q.list)) iucv_process_message_q(sk); if (atomic_read(&iucv->msg_recv) >= iucv->msglimit / 2) { err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN); if (err) { sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); } } } spin_unlock_bh(&iucv->message_q.lock); } done: /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */ if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) copied = rlen; return copied; } Commit Message: net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-20
static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); unsigned int copied, rlen; struct sk_buff *skb, *rskb, *cskb; int err = 0; u32 offset; if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) && list_empty(&iucv->message_q.list)) return 0; if (flags & (MSG_OOB)) return -EOPNOTSUPP; /* receive/dequeue next skb: * the function understands MSG_PEEK and, thus, does not dequeue skb */ skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) return 0; return err; } offset = IUCV_SKB_CB(skb)->offset; rlen = skb->len - offset; /* real length of skb */ copied = min_t(unsigned int, rlen, len); if (!rlen) sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; cskb = skb; if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) { if (!(flags & MSG_PEEK)) skb_queue_head(&sk->sk_receive_queue, skb); return -EFAULT; } /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */ if (sk->sk_type == SOCK_SEQPACKET) { if (copied < rlen) msg->msg_flags |= MSG_TRUNC; /* each iucv message contains a complete record */ msg->msg_flags |= MSG_EOR; } /* create control message to store iucv msg target class: * get the trgcls from the control buffer of the skb due to * fragmentation of original iucv message. */ err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, sizeof(IUCV_SKB_CB(skb)->class), (void *)&IUCV_SKB_CB(skb)->class); if (err) { if (!(flags & MSG_PEEK)) skb_queue_head(&sk->sk_receive_queue, skb); return err; } /* Mark read part of skb as used */ if (!(flags & MSG_PEEK)) { /* SOCK_STREAM: re-queue skb if it contains unreceived data */ if (sk->sk_type == SOCK_STREAM) { if (copied < rlen) { IUCV_SKB_CB(skb)->offset = offset + copied; goto done; } } kfree_skb(skb); if (iucv->transport == AF_IUCV_TRANS_HIPER) { atomic_inc(&iucv->msg_recv); if (atomic_read(&iucv->msg_recv) > iucv->msglimit) { WARN_ON(1); iucv_sock_close(sk); return -EFAULT; } } /* Queue backlog skbs */ spin_lock_bh(&iucv->message_q.lock); rskb = skb_dequeue(&iucv->backlog_skb_q); while (rskb) { IUCV_SKB_CB(rskb)->offset = 0; if (sock_queue_rcv_skb(sk, rskb)) { skb_queue_head(&iucv->backlog_skb_q, rskb); break; } else { rskb = skb_dequeue(&iucv->backlog_skb_q); } } if (skb_queue_empty(&iucv->backlog_skb_q)) { if (!list_empty(&iucv->message_q.list)) iucv_process_message_q(sk); if (atomic_read(&iucv->msg_recv) >= iucv->msglimit / 2) { err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN); if (err) { sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); } } } spin_unlock_bh(&iucv->message_q.lock); } done: /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */ if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) copied = rlen; return copied; }
166,503
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: virtual void SetUp() { vp9_worker_init(&worker_); } Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478 DO NOT MERGE - libvpx: Pull from upstream Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06 BUG=23452792 Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec CWE ID: CWE-119
virtual void SetUp() { vpx_get_worker_interface()->init(&worker_); }
174,599
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: IHEVCD_ERROR_T ihevcd_parse_sps(codec_t *ps_codec) { IHEVCD_ERROR_T ret = (IHEVCD_ERROR_T)IHEVCD_SUCCESS; WORD32 value; WORD32 i; WORD32 vps_id; WORD32 sps_max_sub_layers; WORD32 sps_id; WORD32 sps_temporal_id_nesting_flag; sps_t *ps_sps; profile_tier_lvl_info_t s_ptl; bitstrm_t *ps_bitstrm = &ps_codec->s_parse.s_bitstrm; BITS_PARSE("video_parameter_set_id", value, ps_bitstrm, 4); vps_id = value; vps_id = CLIP3(vps_id, 0, MAX_VPS_CNT - 1); BITS_PARSE("sps_max_sub_layers_minus1", value, ps_bitstrm, 3); sps_max_sub_layers = value + 1; sps_max_sub_layers = CLIP3(sps_max_sub_layers, 1, 7); BITS_PARSE("sps_temporal_id_nesting_flag", value, ps_bitstrm, 1); sps_temporal_id_nesting_flag = value; ret = ihevcd_profile_tier_level(ps_bitstrm, &(s_ptl), 1, (sps_max_sub_layers - 1)); UEV_PARSE("seq_parameter_set_id", value, ps_bitstrm); sps_id = value; if((sps_id >= MAX_SPS_CNT) || (sps_id < 0)) { if(ps_codec->i4_sps_done) return IHEVCD_UNSUPPORTED_SPS_ID; else sps_id = 0; } ps_sps = (ps_codec->s_parse.ps_sps_base + MAX_SPS_CNT - 1); ps_sps->i1_sps_id = sps_id; ps_sps->i1_vps_id = vps_id; ps_sps->i1_sps_max_sub_layers = sps_max_sub_layers; ps_sps->i1_sps_temporal_id_nesting_flag = sps_temporal_id_nesting_flag; /* This is used only during initialization to get reorder count etc */ ps_codec->i4_sps_id = sps_id; memcpy(&ps_sps->s_ptl, &s_ptl, sizeof(profile_tier_lvl_info_t)); UEV_PARSE("chroma_format_idc", value, ps_bitstrm); ps_sps->i1_chroma_format_idc = value; if(ps_sps->i1_chroma_format_idc != CHROMA_FMT_IDC_YUV420) { ps_codec->s_parse.i4_error_code = IHEVCD_UNSUPPORTED_CHROMA_FMT_IDC; return (IHEVCD_ERROR_T)IHEVCD_UNSUPPORTED_CHROMA_FMT_IDC; } if(CHROMA_FMT_IDC_YUV444_PLANES == ps_sps->i1_chroma_format_idc) { BITS_PARSE("separate_colour_plane_flag", value, ps_bitstrm, 1); ps_sps->i1_separate_colour_plane_flag = value; } else { ps_sps->i1_separate_colour_plane_flag = 0; } UEV_PARSE("pic_width_in_luma_samples", value, ps_bitstrm); ps_sps->i2_pic_width_in_luma_samples = value; UEV_PARSE("pic_height_in_luma_samples", value, ps_bitstrm); ps_sps->i2_pic_height_in_luma_samples = value; if((0 >= ps_sps->i2_pic_width_in_luma_samples) || (0 >= ps_sps->i2_pic_height_in_luma_samples)) return IHEVCD_INVALID_PARAMETER; /* i2_pic_width_in_luma_samples and i2_pic_height_in_luma_samples should be multiples of min_cb_size. Here these are aligned to 8, i.e. smallest CB size */ ps_sps->i2_pic_width_in_luma_samples = ALIGN8(ps_sps->i2_pic_width_in_luma_samples); ps_sps->i2_pic_height_in_luma_samples = ALIGN8(ps_sps->i2_pic_height_in_luma_samples); if((ps_sps->i2_pic_width_in_luma_samples > ps_codec->i4_max_wd) || (ps_sps->i2_pic_width_in_luma_samples * ps_sps->i2_pic_height_in_luma_samples > ps_codec->i4_max_wd * ps_codec->i4_max_ht) || (ps_sps->i2_pic_height_in_luma_samples > MAX(ps_codec->i4_max_wd, ps_codec->i4_max_ht))) { ps_codec->i4_new_max_wd = ps_sps->i2_pic_width_in_luma_samples; ps_codec->i4_new_max_ht = ps_sps->i2_pic_height_in_luma_samples; return (IHEVCD_ERROR_T)IHEVCD_UNSUPPORTED_DIMENSIONS; } BITS_PARSE("pic_cropping_flag", value, ps_bitstrm, 1); ps_sps->i1_pic_cropping_flag = value; if(ps_sps->i1_pic_cropping_flag) { UEV_PARSE("pic_crop_left_offset", value, ps_bitstrm); ps_sps->i2_pic_crop_left_offset = value; UEV_PARSE("pic_crop_right_offset", value, ps_bitstrm); ps_sps->i2_pic_crop_right_offset = value; UEV_PARSE("pic_crop_top_offset", value, ps_bitstrm); ps_sps->i2_pic_crop_top_offset = value; UEV_PARSE("pic_crop_bottom_offset", value, ps_bitstrm); ps_sps->i2_pic_crop_bottom_offset = value; } else { ps_sps->i2_pic_crop_left_offset = 0; ps_sps->i2_pic_crop_right_offset = 0; ps_sps->i2_pic_crop_top_offset = 0; ps_sps->i2_pic_crop_bottom_offset = 0; } UEV_PARSE("bit_depth_luma_minus8", value, ps_bitstrm); if(0 != value) return IHEVCD_UNSUPPORTED_BIT_DEPTH; UEV_PARSE("bit_depth_chroma_minus8", value, ps_bitstrm); if(0 != value) return IHEVCD_UNSUPPORTED_BIT_DEPTH; UEV_PARSE("log2_max_pic_order_cnt_lsb_minus4", value, ps_bitstrm); ps_sps->i1_log2_max_pic_order_cnt_lsb = value + 4; BITS_PARSE("sps_sub_layer_ordering_info_present_flag", value, ps_bitstrm, 1); ps_sps->i1_sps_sub_layer_ordering_info_present_flag = value; i = (ps_sps->i1_sps_sub_layer_ordering_info_present_flag ? 0 : (ps_sps->i1_sps_max_sub_layers - 1)); for(; i < ps_sps->i1_sps_max_sub_layers; i++) { UEV_PARSE("max_dec_pic_buffering", value, ps_bitstrm); ps_sps->ai1_sps_max_dec_pic_buffering[i] = value + 1; UEV_PARSE("num_reorder_pics", value, ps_bitstrm); ps_sps->ai1_sps_max_num_reorder_pics[i] = value; UEV_PARSE("max_latency_increase", value, ps_bitstrm); ps_sps->ai1_sps_max_latency_increase[i] = value; } UEV_PARSE("log2_min_coding_block_size_minus3", value, ps_bitstrm); ps_sps->i1_log2_min_coding_block_size = value + 3; UEV_PARSE("log2_diff_max_min_coding_block_size", value, ps_bitstrm); ps_sps->i1_log2_diff_max_min_coding_block_size = value; UEV_PARSE("log2_min_transform_block_size_minus2", value, ps_bitstrm); ps_sps->i1_log2_min_transform_block_size = value + 2; UEV_PARSE("log2_diff_max_min_transform_block_size", value, ps_bitstrm); ps_sps->i1_log2_diff_max_min_transform_block_size = value; ps_sps->i1_log2_max_transform_block_size = ps_sps->i1_log2_min_transform_block_size + ps_sps->i1_log2_diff_max_min_transform_block_size; ps_sps->i1_log2_ctb_size = ps_sps->i1_log2_min_coding_block_size + ps_sps->i1_log2_diff_max_min_coding_block_size; if((ps_sps->i1_log2_min_coding_block_size < 3) || (ps_sps->i1_log2_min_transform_block_size < 2) || (ps_sps->i1_log2_diff_max_min_transform_block_size < 0) || (ps_sps->i1_log2_max_transform_block_size > ps_sps->i1_log2_ctb_size) || (ps_sps->i1_log2_ctb_size < 4) || (ps_sps->i1_log2_ctb_size > 6)) { return IHEVCD_INVALID_PARAMETER; } ps_sps->i1_log2_min_pcm_coding_block_size = 0; ps_sps->i1_log2_diff_max_min_pcm_coding_block_size = 0; UEV_PARSE("max_transform_hierarchy_depth_inter", value, ps_bitstrm); ps_sps->i1_max_transform_hierarchy_depth_inter = value; UEV_PARSE("max_transform_hierarchy_depth_intra", value, ps_bitstrm); ps_sps->i1_max_transform_hierarchy_depth_intra = value; /* String has a d (enabled) in order to match with HM */ BITS_PARSE("scaling_list_enabled_flag", value, ps_bitstrm, 1); ps_sps->i1_scaling_list_enable_flag = value; if(ps_sps->i1_scaling_list_enable_flag) { COPY_DEFAULT_SCALING_LIST(ps_sps->pi2_scaling_mat); BITS_PARSE("sps_scaling_list_data_present_flag", value, ps_bitstrm, 1); ps_sps->i1_sps_scaling_list_data_present_flag = value; if(ps_sps->i1_sps_scaling_list_data_present_flag) ihevcd_scaling_list_data(ps_codec, ps_sps->pi2_scaling_mat); } else { COPY_FLAT_SCALING_LIST(ps_sps->pi2_scaling_mat); } /* String is asymmetric_motion_partitions_enabled_flag instead of amp_enabled_flag in order to match with HM */ BITS_PARSE("asymmetric_motion_partitions_enabled_flag", value, ps_bitstrm, 1); ps_sps->i1_amp_enabled_flag = value; BITS_PARSE("sample_adaptive_offset_enabled_flag", value, ps_bitstrm, 1); ps_sps->i1_sample_adaptive_offset_enabled_flag = value; BITS_PARSE("pcm_enabled_flag", value, ps_bitstrm, 1); ps_sps->i1_pcm_enabled_flag = value; if(ps_sps->i1_pcm_enabled_flag) { BITS_PARSE("pcm_sample_bit_depth_luma", value, ps_bitstrm, 4); ps_sps->i1_pcm_sample_bit_depth_luma = value + 1; BITS_PARSE("pcm_sample_bit_depth_chroma", value, ps_bitstrm, 4); ps_sps->i1_pcm_sample_bit_depth_chroma = value + 1; UEV_PARSE("log2_min_pcm_coding_block_size_minus3", value, ps_bitstrm); ps_sps->i1_log2_min_pcm_coding_block_size = value + 3; UEV_PARSE("log2_diff_max_min_pcm_coding_block_size", value, ps_bitstrm); ps_sps->i1_log2_diff_max_min_pcm_coding_block_size = value; BITS_PARSE("pcm_loop_filter_disable_flag", value, ps_bitstrm, 1); ps_sps->i1_pcm_loop_filter_disable_flag = value; } UEV_PARSE("num_short_term_ref_pic_sets", value, ps_bitstrm); ps_sps->i1_num_short_term_ref_pic_sets = value; ps_sps->i1_num_short_term_ref_pic_sets = CLIP3(ps_sps->i1_num_short_term_ref_pic_sets, 0, MAX_STREF_PICS_SPS); for(i = 0; i < ps_sps->i1_num_short_term_ref_pic_sets; i++) ihevcd_short_term_ref_pic_set(ps_bitstrm, &ps_sps->as_stref_picset[0], ps_sps->i1_num_short_term_ref_pic_sets, i, &ps_sps->as_stref_picset[i]); BITS_PARSE("long_term_ref_pics_present_flag", value, ps_bitstrm, 1); ps_sps->i1_long_term_ref_pics_present_flag = value; if(ps_sps->i1_long_term_ref_pics_present_flag) { UEV_PARSE("num_long_term_ref_pics_sps", value, ps_bitstrm); ps_sps->i1_num_long_term_ref_pics_sps = value; for(i = 0; i < ps_sps->i1_num_long_term_ref_pics_sps; i++) { BITS_PARSE("lt_ref_pic_poc_lsb_sps[ i ]", value, ps_bitstrm, ps_sps->i1_log2_max_pic_order_cnt_lsb); ps_sps->ai1_lt_ref_pic_poc_lsb_sps[i] = value; BITS_PARSE("used_by_curr_pic_lt_sps_flag[ i ]", value, ps_bitstrm, 1); ps_sps->ai1_used_by_curr_pic_lt_sps_flag[i] = value; } } BITS_PARSE("sps_temporal_mvp_enable_flag", value, ps_bitstrm, 1); ps_sps->i1_sps_temporal_mvp_enable_flag = value; /* Print matches HM 8-2 */ BITS_PARSE("sps_strong_intra_smoothing_enable_flag", value, ps_bitstrm, 1); ps_sps->i1_strong_intra_smoothing_enable_flag = value; BITS_PARSE("vui_parameters_present_flag", value, ps_bitstrm, 1); ps_sps->i1_vui_parameters_present_flag = value; if(ps_sps->i1_vui_parameters_present_flag) ihevcd_parse_vui_parameters(ps_bitstrm, &ps_sps->s_vui_parameters, ps_sps->i1_sps_max_sub_layers - 1); BITS_PARSE("sps_extension_flag", value, ps_bitstrm, 1); { WORD32 numerator; WORD32 ceil_offset; ceil_offset = (1 << ps_sps->i1_log2_ctb_size) - 1; numerator = ps_sps->i2_pic_width_in_luma_samples; ps_sps->i2_pic_wd_in_ctb = ((numerator + ceil_offset) / (1 << ps_sps->i1_log2_ctb_size)); numerator = ps_sps->i2_pic_height_in_luma_samples; ps_sps->i2_pic_ht_in_ctb = ((numerator + ceil_offset) / (1 << ps_sps->i1_log2_ctb_size)); ps_sps->i4_pic_size_in_ctb = ps_sps->i2_pic_ht_in_ctb * ps_sps->i2_pic_wd_in_ctb; if(0 == ps_codec->i4_sps_done) ps_codec->s_parse.i4_next_ctb_indx = ps_sps->i4_pic_size_in_ctb; numerator = ps_sps->i2_pic_width_in_luma_samples; ps_sps->i2_pic_wd_in_min_cb = numerator / (1 << ps_sps->i1_log2_min_coding_block_size); numerator = ps_sps->i2_pic_height_in_luma_samples; ps_sps->i2_pic_ht_in_min_cb = numerator / (1 << ps_sps->i1_log2_min_coding_block_size); } if((0 != ps_codec->i4_first_pic_done) && ((ps_codec->i4_wd != ps_sps->i2_pic_width_in_luma_samples) || (ps_codec->i4_ht != ps_sps->i2_pic_height_in_luma_samples))) { ps_codec->i4_reset_flag = 1; ps_codec->i4_error_code = IVD_RES_CHANGED; return (IHEVCD_ERROR_T)IHEVCD_FAIL; } /* Update display width and display height */ { WORD32 disp_wd, disp_ht; WORD32 crop_unit_x, crop_unit_y; crop_unit_x = 1; crop_unit_y = 1; if(CHROMA_FMT_IDC_YUV420 == ps_sps->i1_chroma_format_idc) { crop_unit_x = 2; crop_unit_y = 2; } disp_wd = ps_sps->i2_pic_width_in_luma_samples; disp_wd -= ps_sps->i2_pic_crop_left_offset * crop_unit_x; disp_wd -= ps_sps->i2_pic_crop_right_offset * crop_unit_x; disp_ht = ps_sps->i2_pic_height_in_luma_samples; disp_ht -= ps_sps->i2_pic_crop_top_offset * crop_unit_y; disp_ht -= ps_sps->i2_pic_crop_bottom_offset * crop_unit_y; if((0 >= disp_wd) || (0 >= disp_ht)) return IHEVCD_INVALID_PARAMETER; ps_codec->i4_disp_wd = disp_wd; ps_codec->i4_disp_ht = disp_ht; ps_codec->i4_wd = ps_sps->i2_pic_width_in_luma_samples; ps_codec->i4_ht = ps_sps->i2_pic_height_in_luma_samples; { WORD32 ref_strd; ref_strd = ALIGN32(ps_sps->i2_pic_width_in_luma_samples + PAD_WD); if(ps_codec->i4_strd < ref_strd) { ps_codec->i4_strd = ref_strd; } } if(0 == ps_codec->i4_share_disp_buf) { if(ps_codec->i4_disp_strd < ps_codec->i4_disp_wd) { ps_codec->i4_disp_strd = ps_codec->i4_disp_wd; } } else { if(ps_codec->i4_disp_strd < ps_codec->i4_strd) { ps_codec->i4_disp_strd = ps_codec->i4_strd; } } } ps_codec->i4_sps_done = 1; return ret; } Commit Message: Handle invalid num_reorder_pics & max_dec_pic_buffering in SPS Bug: 33864300 Change-Id: I920e45c3420a1a41a366ad45bd4186c5f6af6d6b CWE ID: CWE-119
IHEVCD_ERROR_T ihevcd_parse_sps(codec_t *ps_codec) { IHEVCD_ERROR_T ret = (IHEVCD_ERROR_T)IHEVCD_SUCCESS; WORD32 value; WORD32 i; WORD32 vps_id; WORD32 sps_max_sub_layers; WORD32 sps_id; WORD32 sps_temporal_id_nesting_flag; sps_t *ps_sps; profile_tier_lvl_info_t s_ptl; bitstrm_t *ps_bitstrm = &ps_codec->s_parse.s_bitstrm; BITS_PARSE("video_parameter_set_id", value, ps_bitstrm, 4); vps_id = value; vps_id = CLIP3(vps_id, 0, MAX_VPS_CNT - 1); BITS_PARSE("sps_max_sub_layers_minus1", value, ps_bitstrm, 3); sps_max_sub_layers = value + 1; sps_max_sub_layers = CLIP3(sps_max_sub_layers, 1, 7); BITS_PARSE("sps_temporal_id_nesting_flag", value, ps_bitstrm, 1); sps_temporal_id_nesting_flag = value; ret = ihevcd_profile_tier_level(ps_bitstrm, &(s_ptl), 1, (sps_max_sub_layers - 1)); UEV_PARSE("seq_parameter_set_id", value, ps_bitstrm); sps_id = value; if((sps_id >= MAX_SPS_CNT) || (sps_id < 0)) { if(ps_codec->i4_sps_done) return IHEVCD_UNSUPPORTED_SPS_ID; else sps_id = 0; } ps_sps = (ps_codec->s_parse.ps_sps_base + MAX_SPS_CNT - 1); ps_sps->i1_sps_id = sps_id; ps_sps->i1_vps_id = vps_id; ps_sps->i1_sps_max_sub_layers = sps_max_sub_layers; ps_sps->i1_sps_temporal_id_nesting_flag = sps_temporal_id_nesting_flag; /* This is used only during initialization to get reorder count etc */ ps_codec->i4_sps_id = sps_id; memcpy(&ps_sps->s_ptl, &s_ptl, sizeof(profile_tier_lvl_info_t)); UEV_PARSE("chroma_format_idc", value, ps_bitstrm); ps_sps->i1_chroma_format_idc = value; if(ps_sps->i1_chroma_format_idc != CHROMA_FMT_IDC_YUV420) { ps_codec->s_parse.i4_error_code = IHEVCD_UNSUPPORTED_CHROMA_FMT_IDC; return (IHEVCD_ERROR_T)IHEVCD_UNSUPPORTED_CHROMA_FMT_IDC; } if(CHROMA_FMT_IDC_YUV444_PLANES == ps_sps->i1_chroma_format_idc) { BITS_PARSE("separate_colour_plane_flag", value, ps_bitstrm, 1); ps_sps->i1_separate_colour_plane_flag = value; } else { ps_sps->i1_separate_colour_plane_flag = 0; } UEV_PARSE("pic_width_in_luma_samples", value, ps_bitstrm); ps_sps->i2_pic_width_in_luma_samples = value; UEV_PARSE("pic_height_in_luma_samples", value, ps_bitstrm); ps_sps->i2_pic_height_in_luma_samples = value; if((0 >= ps_sps->i2_pic_width_in_luma_samples) || (0 >= ps_sps->i2_pic_height_in_luma_samples)) return IHEVCD_INVALID_PARAMETER; /* i2_pic_width_in_luma_samples and i2_pic_height_in_luma_samples should be multiples of min_cb_size. Here these are aligned to 8, i.e. smallest CB size */ ps_sps->i2_pic_width_in_luma_samples = ALIGN8(ps_sps->i2_pic_width_in_luma_samples); ps_sps->i2_pic_height_in_luma_samples = ALIGN8(ps_sps->i2_pic_height_in_luma_samples); if((ps_sps->i2_pic_width_in_luma_samples > ps_codec->i4_max_wd) || (ps_sps->i2_pic_width_in_luma_samples * ps_sps->i2_pic_height_in_luma_samples > ps_codec->i4_max_wd * ps_codec->i4_max_ht) || (ps_sps->i2_pic_height_in_luma_samples > MAX(ps_codec->i4_max_wd, ps_codec->i4_max_ht))) { ps_codec->i4_new_max_wd = ps_sps->i2_pic_width_in_luma_samples; ps_codec->i4_new_max_ht = ps_sps->i2_pic_height_in_luma_samples; return (IHEVCD_ERROR_T)IHEVCD_UNSUPPORTED_DIMENSIONS; } BITS_PARSE("pic_cropping_flag", value, ps_bitstrm, 1); ps_sps->i1_pic_cropping_flag = value; if(ps_sps->i1_pic_cropping_flag) { UEV_PARSE("pic_crop_left_offset", value, ps_bitstrm); ps_sps->i2_pic_crop_left_offset = value; UEV_PARSE("pic_crop_right_offset", value, ps_bitstrm); ps_sps->i2_pic_crop_right_offset = value; UEV_PARSE("pic_crop_top_offset", value, ps_bitstrm); ps_sps->i2_pic_crop_top_offset = value; UEV_PARSE("pic_crop_bottom_offset", value, ps_bitstrm); ps_sps->i2_pic_crop_bottom_offset = value; } else { ps_sps->i2_pic_crop_left_offset = 0; ps_sps->i2_pic_crop_right_offset = 0; ps_sps->i2_pic_crop_top_offset = 0; ps_sps->i2_pic_crop_bottom_offset = 0; } UEV_PARSE("bit_depth_luma_minus8", value, ps_bitstrm); if(0 != value) return IHEVCD_UNSUPPORTED_BIT_DEPTH; UEV_PARSE("bit_depth_chroma_minus8", value, ps_bitstrm); if(0 != value) return IHEVCD_UNSUPPORTED_BIT_DEPTH; UEV_PARSE("log2_max_pic_order_cnt_lsb_minus4", value, ps_bitstrm); ps_sps->i1_log2_max_pic_order_cnt_lsb = value + 4; BITS_PARSE("sps_sub_layer_ordering_info_present_flag", value, ps_bitstrm, 1); ps_sps->i1_sps_sub_layer_ordering_info_present_flag = value; i = (ps_sps->i1_sps_sub_layer_ordering_info_present_flag ? 0 : (ps_sps->i1_sps_max_sub_layers - 1)); for(; i < ps_sps->i1_sps_max_sub_layers; i++) { UEV_PARSE("max_dec_pic_buffering", value, ps_bitstrm); ps_sps->ai1_sps_max_dec_pic_buffering[i] = value + 1; if(ps_sps->ai1_sps_max_dec_pic_buffering[i] > MAX_DPB_SIZE) { return IHEVCD_INVALID_PARAMETER; } UEV_PARSE("num_reorder_pics", value, ps_bitstrm); ps_sps->ai1_sps_max_num_reorder_pics[i] = value; if(ps_sps->ai1_sps_max_num_reorder_pics[i] > ps_sps->ai1_sps_max_dec_pic_buffering[i]) { return IHEVCD_INVALID_PARAMETER; } UEV_PARSE("max_latency_increase", value, ps_bitstrm); ps_sps->ai1_sps_max_latency_increase[i] = value; } UEV_PARSE("log2_min_coding_block_size_minus3", value, ps_bitstrm); ps_sps->i1_log2_min_coding_block_size = value + 3; UEV_PARSE("log2_diff_max_min_coding_block_size", value, ps_bitstrm); ps_sps->i1_log2_diff_max_min_coding_block_size = value; UEV_PARSE("log2_min_transform_block_size_minus2", value, ps_bitstrm); ps_sps->i1_log2_min_transform_block_size = value + 2; UEV_PARSE("log2_diff_max_min_transform_block_size", value, ps_bitstrm); ps_sps->i1_log2_diff_max_min_transform_block_size = value; ps_sps->i1_log2_max_transform_block_size = ps_sps->i1_log2_min_transform_block_size + ps_sps->i1_log2_diff_max_min_transform_block_size; ps_sps->i1_log2_ctb_size = ps_sps->i1_log2_min_coding_block_size + ps_sps->i1_log2_diff_max_min_coding_block_size; if((ps_sps->i1_log2_min_coding_block_size < 3) || (ps_sps->i1_log2_min_transform_block_size < 2) || (ps_sps->i1_log2_diff_max_min_transform_block_size < 0) || (ps_sps->i1_log2_max_transform_block_size > ps_sps->i1_log2_ctb_size) || (ps_sps->i1_log2_ctb_size < 4) || (ps_sps->i1_log2_ctb_size > 6)) { return IHEVCD_INVALID_PARAMETER; } ps_sps->i1_log2_min_pcm_coding_block_size = 0; ps_sps->i1_log2_diff_max_min_pcm_coding_block_size = 0; UEV_PARSE("max_transform_hierarchy_depth_inter", value, ps_bitstrm); ps_sps->i1_max_transform_hierarchy_depth_inter = value; UEV_PARSE("max_transform_hierarchy_depth_intra", value, ps_bitstrm); ps_sps->i1_max_transform_hierarchy_depth_intra = value; /* String has a d (enabled) in order to match with HM */ BITS_PARSE("scaling_list_enabled_flag", value, ps_bitstrm, 1); ps_sps->i1_scaling_list_enable_flag = value; if(ps_sps->i1_scaling_list_enable_flag) { COPY_DEFAULT_SCALING_LIST(ps_sps->pi2_scaling_mat); BITS_PARSE("sps_scaling_list_data_present_flag", value, ps_bitstrm, 1); ps_sps->i1_sps_scaling_list_data_present_flag = value; if(ps_sps->i1_sps_scaling_list_data_present_flag) ihevcd_scaling_list_data(ps_codec, ps_sps->pi2_scaling_mat); } else { COPY_FLAT_SCALING_LIST(ps_sps->pi2_scaling_mat); } /* String is asymmetric_motion_partitions_enabled_flag instead of amp_enabled_flag in order to match with HM */ BITS_PARSE("asymmetric_motion_partitions_enabled_flag", value, ps_bitstrm, 1); ps_sps->i1_amp_enabled_flag = value; BITS_PARSE("sample_adaptive_offset_enabled_flag", value, ps_bitstrm, 1); ps_sps->i1_sample_adaptive_offset_enabled_flag = value; BITS_PARSE("pcm_enabled_flag", value, ps_bitstrm, 1); ps_sps->i1_pcm_enabled_flag = value; if(ps_sps->i1_pcm_enabled_flag) { BITS_PARSE("pcm_sample_bit_depth_luma", value, ps_bitstrm, 4); ps_sps->i1_pcm_sample_bit_depth_luma = value + 1; BITS_PARSE("pcm_sample_bit_depth_chroma", value, ps_bitstrm, 4); ps_sps->i1_pcm_sample_bit_depth_chroma = value + 1; UEV_PARSE("log2_min_pcm_coding_block_size_minus3", value, ps_bitstrm); ps_sps->i1_log2_min_pcm_coding_block_size = value + 3; UEV_PARSE("log2_diff_max_min_pcm_coding_block_size", value, ps_bitstrm); ps_sps->i1_log2_diff_max_min_pcm_coding_block_size = value; BITS_PARSE("pcm_loop_filter_disable_flag", value, ps_bitstrm, 1); ps_sps->i1_pcm_loop_filter_disable_flag = value; } UEV_PARSE("num_short_term_ref_pic_sets", value, ps_bitstrm); ps_sps->i1_num_short_term_ref_pic_sets = value; ps_sps->i1_num_short_term_ref_pic_sets = CLIP3(ps_sps->i1_num_short_term_ref_pic_sets, 0, MAX_STREF_PICS_SPS); for(i = 0; i < ps_sps->i1_num_short_term_ref_pic_sets; i++) ihevcd_short_term_ref_pic_set(ps_bitstrm, &ps_sps->as_stref_picset[0], ps_sps->i1_num_short_term_ref_pic_sets, i, &ps_sps->as_stref_picset[i]); BITS_PARSE("long_term_ref_pics_present_flag", value, ps_bitstrm, 1); ps_sps->i1_long_term_ref_pics_present_flag = value; if(ps_sps->i1_long_term_ref_pics_present_flag) { UEV_PARSE("num_long_term_ref_pics_sps", value, ps_bitstrm); ps_sps->i1_num_long_term_ref_pics_sps = value; for(i = 0; i < ps_sps->i1_num_long_term_ref_pics_sps; i++) { BITS_PARSE("lt_ref_pic_poc_lsb_sps[ i ]", value, ps_bitstrm, ps_sps->i1_log2_max_pic_order_cnt_lsb); ps_sps->ai1_lt_ref_pic_poc_lsb_sps[i] = value; BITS_PARSE("used_by_curr_pic_lt_sps_flag[ i ]", value, ps_bitstrm, 1); ps_sps->ai1_used_by_curr_pic_lt_sps_flag[i] = value; } } BITS_PARSE("sps_temporal_mvp_enable_flag", value, ps_bitstrm, 1); ps_sps->i1_sps_temporal_mvp_enable_flag = value; /* Print matches HM 8-2 */ BITS_PARSE("sps_strong_intra_smoothing_enable_flag", value, ps_bitstrm, 1); ps_sps->i1_strong_intra_smoothing_enable_flag = value; BITS_PARSE("vui_parameters_present_flag", value, ps_bitstrm, 1); ps_sps->i1_vui_parameters_present_flag = value; if(ps_sps->i1_vui_parameters_present_flag) ihevcd_parse_vui_parameters(ps_bitstrm, &ps_sps->s_vui_parameters, ps_sps->i1_sps_max_sub_layers - 1); BITS_PARSE("sps_extension_flag", value, ps_bitstrm, 1); { WORD32 numerator; WORD32 ceil_offset; ceil_offset = (1 << ps_sps->i1_log2_ctb_size) - 1; numerator = ps_sps->i2_pic_width_in_luma_samples; ps_sps->i2_pic_wd_in_ctb = ((numerator + ceil_offset) / (1 << ps_sps->i1_log2_ctb_size)); numerator = ps_sps->i2_pic_height_in_luma_samples; ps_sps->i2_pic_ht_in_ctb = ((numerator + ceil_offset) / (1 << ps_sps->i1_log2_ctb_size)); ps_sps->i4_pic_size_in_ctb = ps_sps->i2_pic_ht_in_ctb * ps_sps->i2_pic_wd_in_ctb; if(0 == ps_codec->i4_sps_done) ps_codec->s_parse.i4_next_ctb_indx = ps_sps->i4_pic_size_in_ctb; numerator = ps_sps->i2_pic_width_in_luma_samples; ps_sps->i2_pic_wd_in_min_cb = numerator / (1 << ps_sps->i1_log2_min_coding_block_size); numerator = ps_sps->i2_pic_height_in_luma_samples; ps_sps->i2_pic_ht_in_min_cb = numerator / (1 << ps_sps->i1_log2_min_coding_block_size); } if((0 != ps_codec->i4_first_pic_done) && ((ps_codec->i4_wd != ps_sps->i2_pic_width_in_luma_samples) || (ps_codec->i4_ht != ps_sps->i2_pic_height_in_luma_samples))) { ps_codec->i4_reset_flag = 1; ps_codec->i4_error_code = IVD_RES_CHANGED; return (IHEVCD_ERROR_T)IHEVCD_FAIL; } /* Update display width and display height */ { WORD32 disp_wd, disp_ht; WORD32 crop_unit_x, crop_unit_y; crop_unit_x = 1; crop_unit_y = 1; if(CHROMA_FMT_IDC_YUV420 == ps_sps->i1_chroma_format_idc) { crop_unit_x = 2; crop_unit_y = 2; } disp_wd = ps_sps->i2_pic_width_in_luma_samples; disp_wd -= ps_sps->i2_pic_crop_left_offset * crop_unit_x; disp_wd -= ps_sps->i2_pic_crop_right_offset * crop_unit_x; disp_ht = ps_sps->i2_pic_height_in_luma_samples; disp_ht -= ps_sps->i2_pic_crop_top_offset * crop_unit_y; disp_ht -= ps_sps->i2_pic_crop_bottom_offset * crop_unit_y; if((0 >= disp_wd) || (0 >= disp_ht)) return IHEVCD_INVALID_PARAMETER; ps_codec->i4_disp_wd = disp_wd; ps_codec->i4_disp_ht = disp_ht; ps_codec->i4_wd = ps_sps->i2_pic_width_in_luma_samples; ps_codec->i4_ht = ps_sps->i2_pic_height_in_luma_samples; { WORD32 ref_strd; ref_strd = ALIGN32(ps_sps->i2_pic_width_in_luma_samples + PAD_WD); if(ps_codec->i4_strd < ref_strd) { ps_codec->i4_strd = ref_strd; } } if(0 == ps_codec->i4_share_disp_buf) { if(ps_codec->i4_disp_strd < ps_codec->i4_disp_wd) { ps_codec->i4_disp_strd = ps_codec->i4_disp_wd; } } else { if(ps_codec->i4_disp_strd < ps_codec->i4_strd) { ps_codec->i4_disp_strd = ps_codec->i4_strd; } } } ps_codec->i4_sps_done = 1; return ret; }
174,053
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ServiceWorkerContainer::registerServiceWorkerImpl(ExecutionContext* executionContext, const KURL& rawScriptURL, const KURL& scope, PassOwnPtr<RegistrationCallbacks> callbacks) { if (!m_provider) { callbacks->onError(WebServiceWorkerError(WebServiceWorkerError::ErrorTypeState, "Failed to register a ServiceWorker: The document is in an invalid state.")); return; } RefPtr<SecurityOrigin> documentOrigin = executionContext->getSecurityOrigin(); String errorMessage; if (!executionContext->isSecureContext(errorMessage)) { callbacks->onError(WebServiceWorkerError(WebServiceWorkerError::ErrorTypeSecurity, errorMessage)); return; } KURL pageURL = KURL(KURL(), documentOrigin->toString()); if (!SchemeRegistry::shouldTreatURLSchemeAsAllowingServiceWorkers(pageURL.protocol())) { callbacks->onError(WebServiceWorkerError(WebServiceWorkerError::ErrorTypeSecurity, String("Failed to register a ServiceWorker: The URL protocol of the current origin ('" + documentOrigin->toString() + "') is not supported."))); return; } KURL scriptURL = rawScriptURL; scriptURL.removeFragmentIdentifier(); if (!documentOrigin->canRequest(scriptURL)) { RefPtr<SecurityOrigin> scriptOrigin = SecurityOrigin::create(scriptURL); callbacks->onError(WebServiceWorkerError(WebServiceWorkerError::ErrorTypeSecurity, String("Failed to register a ServiceWorker: The origin of the provided scriptURL ('" + scriptOrigin->toString() + "') does not match the current origin ('" + documentOrigin->toString() + "')."))); return; } if (!SchemeRegistry::shouldTreatURLSchemeAsAllowingServiceWorkers(scriptURL.protocol())) { callbacks->onError(WebServiceWorkerError(WebServiceWorkerError::ErrorTypeSecurity, String("Failed to register a ServiceWorker: The URL protocol of the script ('" + scriptURL.getString() + "') is not supported."))); return; } KURL patternURL = scope; patternURL.removeFragmentIdentifier(); if (!documentOrigin->canRequest(patternURL)) { RefPtr<SecurityOrigin> patternOrigin = SecurityOrigin::create(patternURL); callbacks->onError(WebServiceWorkerError(WebServiceWorkerError::ErrorTypeSecurity, String("Failed to register a ServiceWorker: The origin of the provided scope ('" + patternOrigin->toString() + "') does not match the current origin ('" + documentOrigin->toString() + "')."))); return; } if (!SchemeRegistry::shouldTreatURLSchemeAsAllowingServiceWorkers(patternURL.protocol())) { callbacks->onError(WebServiceWorkerError(WebServiceWorkerError::ErrorTypeSecurity, String("Failed to register a ServiceWorker: The URL protocol of the scope ('" + patternURL.getString() + "') is not supported."))); return; } WebString webErrorMessage; if (!m_provider->validateScopeAndScriptURL(patternURL, scriptURL, &webErrorMessage)) { callbacks->onError(WebServiceWorkerError(WebServiceWorkerError::ErrorTypeType, WebString::fromUTF8("Failed to register a ServiceWorker: " + webErrorMessage.utf8()))); return; } m_provider->registerServiceWorker(patternURL, scriptURL, callbacks.leakPtr()); } Commit Message: Check CSP before registering ServiceWorkers Service Worker registrations should be subject to the same CSP checks as other workers. The spec doesn't say this explicitly (https://www.w3.org/TR/CSP2/#directive-child-src-workers says "Worker or SharedWorker constructors"), but it seems to be in the spirit of things, and it matches Firefox's behavior. BUG=579801 Review URL: https://codereview.chromium.org/1861253004 Cr-Commit-Position: refs/heads/master@{#385775} CWE ID: CWE-284
void ServiceWorkerContainer::registerServiceWorkerImpl(ExecutionContext* executionContext, const KURL& rawScriptURL, const KURL& scope, PassOwnPtr<RegistrationCallbacks> callbacks) { if (!m_provider) { callbacks->onError(WebServiceWorkerError(WebServiceWorkerError::ErrorTypeState, "Failed to register a ServiceWorker: The document is in an invalid state.")); return; } RefPtr<SecurityOrigin> documentOrigin = executionContext->getSecurityOrigin(); String errorMessage; if (!executionContext->isSecureContext(errorMessage)) { callbacks->onError(WebServiceWorkerError(WebServiceWorkerError::ErrorTypeSecurity, errorMessage)); return; } KURL pageURL = KURL(KURL(), documentOrigin->toString()); if (!SchemeRegistry::shouldTreatURLSchemeAsAllowingServiceWorkers(pageURL.protocol())) { callbacks->onError(WebServiceWorkerError(WebServiceWorkerError::ErrorTypeSecurity, String("Failed to register a ServiceWorker: The URL protocol of the current origin ('" + documentOrigin->toString() + "') is not supported."))); return; } KURL scriptURL = rawScriptURL; scriptURL.removeFragmentIdentifier(); if (!documentOrigin->canRequest(scriptURL)) { RefPtr<SecurityOrigin> scriptOrigin = SecurityOrigin::create(scriptURL); callbacks->onError(WebServiceWorkerError(WebServiceWorkerError::ErrorTypeSecurity, String("Failed to register a ServiceWorker: The origin of the provided scriptURL ('" + scriptOrigin->toString() + "') does not match the current origin ('" + documentOrigin->toString() + "')."))); return; } if (!SchemeRegistry::shouldTreatURLSchemeAsAllowingServiceWorkers(scriptURL.protocol())) { callbacks->onError(WebServiceWorkerError(WebServiceWorkerError::ErrorTypeSecurity, String("Failed to register a ServiceWorker: The URL protocol of the script ('" + scriptURL.getString() + "') is not supported."))); return; } KURL patternURL = scope; patternURL.removeFragmentIdentifier(); if (!documentOrigin->canRequest(patternURL)) { RefPtr<SecurityOrigin> patternOrigin = SecurityOrigin::create(patternURL); callbacks->onError(WebServiceWorkerError(WebServiceWorkerError::ErrorTypeSecurity, String("Failed to register a ServiceWorker: The origin of the provided scope ('" + patternOrigin->toString() + "') does not match the current origin ('" + documentOrigin->toString() + "')."))); return; } if (!SchemeRegistry::shouldTreatURLSchemeAsAllowingServiceWorkers(patternURL.protocol())) { callbacks->onError(WebServiceWorkerError(WebServiceWorkerError::ErrorTypeSecurity, String("Failed to register a ServiceWorker: The URL protocol of the scope ('" + patternURL.getString() + "') is not supported."))); return; } WebString webErrorMessage; if (!m_provider->validateScopeAndScriptURL(patternURL, scriptURL, &webErrorMessage)) { callbacks->onError(WebServiceWorkerError(WebServiceWorkerError::ErrorTypeType, WebString::fromUTF8("Failed to register a ServiceWorker: " + webErrorMessage.utf8()))); return; } ContentSecurityPolicy* csp = executionContext->contentSecurityPolicy(); if (csp) { if (!csp->allowWorkerContextFromSource(scriptURL, ContentSecurityPolicy::DidNotRedirect, ContentSecurityPolicy::SendReport)) { callbacks->onError(WebServiceWorkerError(WebServiceWorkerError::ErrorTypeSecurity, String("Failed to register a ServiceWorker: The provided scriptURL ('" + scriptURL.getString() + "') violates the Content Security Policy."))); return; } } m_provider->registerServiceWorker(patternURL, scriptURL, callbacks.leakPtr()); }
173,285
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int UDPSocketWin::DoBind(const IPEndPoint& address) { SockaddrStorage storage; if (!address.ToSockAddr(storage.addr, &storage.addr_len)) return ERR_ADDRESS_INVALID; int rv = bind(socket_, storage.addr, storage.addr_len); if (rv == 0) return OK; int last_error = WSAGetLastError(); UMA_HISTOGRAM_SPARSE_SLOWLY("Net.UdpSocketBindErrorFromWinOS", last_error); if (last_error == WSAEACCES || last_error == WSAEINVAL) return ERR_ADDRESS_IN_USE; return MapSystemError(last_error); } Commit Message: Map posix error codes in bind better, and fix one windows mapping. r=wtc BUG=330233 Review URL: https://codereview.chromium.org/101193008 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@242224 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-416
int UDPSocketWin::DoBind(const IPEndPoint& address) { SockaddrStorage storage; if (!address.ToSockAddr(storage.addr, &storage.addr_len)) return ERR_ADDRESS_INVALID; int rv = bind(socket_, storage.addr, storage.addr_len); if (rv == 0) return OK; int last_error = WSAGetLastError(); UMA_HISTOGRAM_SPARSE_SLOWLY("Net.UdpSocketBindErrorFromWinOS", last_error); // * WSAEACCES: If a port is already bound to a socket, WSAEACCES may be // returned instead of WSAEADDRINUSE, depending on whether the socket // option SO_REUSEADDR or SO_EXCLUSIVEADDRUSE is set and whether the // conflicting socket is owned by a different user account. See the MSDN // page "Using SO_REUSEADDR and SO_EXCLUSIVEADDRUSE" for the gory details. if (last_error == WSAEACCES || last_error == WSAEADDRNOTAVAIL) return ERR_ADDRESS_IN_USE; return MapSystemError(last_error); }
171,317
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static Image *ReadARTImage(const ImageInfo *image_info,ExceptionInfo *exception) { const unsigned char *pixels; Image *image; QuantumInfo *quantum_info; QuantumType quantum_type; MagickBooleanType status; size_t length; ssize_t count, y; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } image->depth=1; image->endian=MSBEndian; (void) ReadBlobLSBShort(image); image->columns=(size_t) ReadBlobLSBShort(image); (void) ReadBlobLSBShort(image); image->rows=(size_t) ReadBlobLSBShort(image); if ((image->columns == 0) || (image->rows == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image colormap. */ if (AcquireImageColormap(image,2) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } /* Convert bi-level image to pixel packets. */ SetImageColorspace(image,GRAYColorspace); quantum_type=IndexQuantum; quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); length=GetQuantumExtent(image,quantum_info,quantum_type); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; pixels=(const unsigned char *) ReadBlobStream(image,length, GetQuantumPixels(quantum_info),&count); if (count != (ssize_t) length) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); (void) ReadBlobStream(image,(size_t) (-(ssize_t) length) & 0x01, GetQuantumPixels(quantum_info),&count); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } SetQuantumImageType(image,quantum_type); quantum_info=DestroyQuantumInfo(quantum_info); if (EOFBlob(image) != MagickFalse) ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); (void) CloseBlob(image); return(GetFirstImageInList(image)); } Commit Message: Fixed memory leak reported in #456. CWE ID: CWE-772
static Image *ReadARTImage(const ImageInfo *image_info,ExceptionInfo *exception) { const unsigned char *pixels; Image *image; QuantumInfo *quantum_info; QuantumType quantum_type; MagickBooleanType status; size_t length; ssize_t count, y; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } image->depth=1; image->endian=MSBEndian; (void) ReadBlobLSBShort(image); image->columns=(size_t) ReadBlobLSBShort(image); (void) ReadBlobLSBShort(image); image->rows=(size_t) ReadBlobLSBShort(image); if ((image->columns == 0) || (image->rows == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image colormap. */ if (AcquireImageColormap(image,2) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } /* Convert bi-level image to pixel packets. */ SetImageColorspace(image,GRAYColorspace); quantum_type=IndexQuantum; quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); length=GetQuantumExtent(image,quantum_info,quantum_type); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; pixels=(const unsigned char *) ReadBlobStream(image,length, GetQuantumPixels(quantum_info),&count); if (count != (ssize_t) length) { quantum_info=DestroyQuantumInfo(quantum_info); ThrowReaderException(CorruptImageError,"UnableToReadImageData"); } (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); (void) ReadBlobStream(image,(size_t) (-(ssize_t) length) & 0x01, GetQuantumPixels(quantum_info),&count); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } SetQuantumImageType(image,quantum_type); quantum_info=DestroyQuantumInfo(quantum_info); if (EOFBlob(image) != MagickFalse) ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); (void) CloseBlob(image); return(GetFirstImageInList(image)); }
168,123
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: transform_enable(PNG_CONST char *name) { /* Everything starts out enabled, so if we see an 'enable' disabled * everything else the first time round. */ static int all_disabled = 0; int found_it = 0; image_transform *list = image_transform_first; while (list != &image_transform_end) { if (strcmp(list->name, name) == 0) { list->enable = 1; found_it = 1; } else if (!all_disabled) list->enable = 0; list = list->list; } all_disabled = 1; if (!found_it) { fprintf(stderr, "pngvalid: --transform-enable=%s: unknown transform\n", name); exit(99); } } Commit Message: DO NOT MERGE Update libpng to 1.6.20 BUG:23265085 Change-Id: I85199805636d771f3597b691b63bc0bf46084833 (cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82) CWE ID:
transform_enable(PNG_CONST char *name) image_transform_png_set_invert_alpha_mod(const image_transform *this, image_pixel *that, png_const_structp pp, const transform_display *display) { if (that->colour_type & 4) that->alpha_inverted = 1; this->next->mod(this->next, that, pp, display); } static int image_transform_png_set_invert_alpha_add(image_transform *this, const image_transform **that, png_byte colour_type, png_byte bit_depth) { UNUSED(bit_depth) this->next = *that; *that = this; /* Only has an effect on pixels with alpha: */ return (colour_type & 4) != 0; } IT(invert_alpha); #undef PT #define PT ITSTRUCT(invert_alpha) #endif /* PNG_READ_INVERT_ALPHA_SUPPORTED */ /* png_set_bgr */ #ifdef PNG_READ_BGR_SUPPORTED /* Swap R,G,B channels to order B,G,R. * * png_set_bgr(png_structrp png_ptr) * * This only has an effect on RGB and RGBA pixels. */ static void image_transform_png_set_bgr_set(const image_transform *this, transform_display *that, png_structp pp, png_infop pi) { png_set_bgr(pp); this->next->set(this->next, that, pp, pi); } static void image_transform_png_set_bgr_mod(const image_transform *this, image_pixel *that, png_const_structp pp, const transform_display *display) { if (that->colour_type == PNG_COLOR_TYPE_RGB || that->colour_type == PNG_COLOR_TYPE_RGBA) that->swap_rgb = 1; this->next->mod(this->next, that, pp, display); } static int image_transform_png_set_bgr_add(image_transform *this, const image_transform **that, png_byte colour_type, png_byte bit_depth) { UNUSED(bit_depth) this->next = *that; *that = this; return colour_type == PNG_COLOR_TYPE_RGB || colour_type == PNG_COLOR_TYPE_RGBA; } IT(bgr); #undef PT #define PT ITSTRUCT(bgr) #endif /* PNG_READ_BGR_SUPPORTED */ /* png_set_swap_alpha */ #ifdef PNG_READ_SWAP_ALPHA_SUPPORTED /* Put the alpha channel first. * * png_set_swap_alpha(png_structrp png_ptr) * * This only has an effect on GA and RGBA pixels. */ static void image_transform_png_set_swap_alpha_set(const image_transform *this, transform_display *that, png_structp pp, png_infop pi) { png_set_swap_alpha(pp); this->next->set(this->next, that, pp, pi); } static void image_transform_png_set_swap_alpha_mod(const image_transform *this, image_pixel *that, png_const_structp pp, const transform_display *display) { if (that->colour_type == PNG_COLOR_TYPE_GA || that->colour_type == PNG_COLOR_TYPE_RGBA) that->alpha_first = 1; this->next->mod(this->next, that, pp, display); } static int image_transform_png_set_swap_alpha_add(image_transform *this, const image_transform **that, png_byte colour_type, png_byte bit_depth) { UNUSED(bit_depth) this->next = *that; *that = this; return colour_type == PNG_COLOR_TYPE_GA || colour_type == PNG_COLOR_TYPE_RGBA; } IT(swap_alpha); #undef PT #define PT ITSTRUCT(swap_alpha) #endif /* PNG_READ_SWAP_ALPHA_SUPPORTED */ /* png_set_swap */ #ifdef PNG_READ_SWAP_SUPPORTED /* Byte swap 16-bit components. * * png_set_swap(png_structrp png_ptr) */ static void image_transform_png_set_swap_set(const image_transform *this, transform_display *that, png_structp pp, png_infop pi) { png_set_swap(pp); this->next->set(this->next, that, pp, pi); } static void image_transform_png_set_swap_mod(const image_transform *this, image_pixel *that, png_const_structp pp, const transform_display *display) { if (that->bit_depth == 16) that->swap16 = 1; this->next->mod(this->next, that, pp, display); } static int image_transform_png_set_swap_add(image_transform *this, const image_transform **that, png_byte colour_type, png_byte bit_depth) { UNUSED(colour_type) this->next = *that; *that = this; return bit_depth == 16; } IT(swap); #undef PT #define PT ITSTRUCT(swap) #endif /* PNG_READ_SWAP_SUPPORTED */ #ifdef PNG_READ_FILLER_SUPPORTED /* Add a filler byte to 8-bit Gray or 24-bit RGB images. * * png_set_filler, (png_structp png_ptr, png_uint_32 filler, int flags)); * * Flags: * * PNG_FILLER_BEFORE * PNG_FILLER_AFTER */ #define data ITDATA(filler) static struct { png_uint_32 filler; int flags; } data; static void image_transform_png_set_filler_set(const image_transform *this, transform_display *that, png_structp pp, png_infop pi) { /* Need a random choice for 'before' and 'after' as well as for the * filler. The 'filler' value has all 32 bits set, but only bit_depth * will be used. At this point we don't know bit_depth. */ RANDOMIZE(data.filler); data.flags = random_choice(); png_set_filler(pp, data.filler, data.flags); /* The standard display handling stuff also needs to know that * there is a filler, so set that here. */ that->this.filler = 1; this->next->set(this->next, that, pp, pi); } static void image_transform_png_set_filler_mod(const image_transform *this, image_pixel *that, png_const_structp pp, const transform_display *display) { if (that->bit_depth >= 8 && (that->colour_type == PNG_COLOR_TYPE_RGB || that->colour_type == PNG_COLOR_TYPE_GRAY)) { const unsigned int max = (1U << that->bit_depth)-1; that->alpha = data.filler & max; that->alphaf = ((double)that->alpha) / max; that->alphae = 0; /* The filler has been stored in the alpha channel, we must record * that this has been done for the checking later on, the color * type is faked to have an alpha channel, but libpng won't report * this; the app has to know the extra channel is there and this * was recording in standard_display::filler above. */ that->colour_type |= 4; /* alpha added */ that->alpha_first = data.flags == PNG_FILLER_BEFORE; } this->next->mod(this->next, that, pp, display); } static int image_transform_png_set_filler_add(image_transform *this, const image_transform **that, png_byte colour_type, png_byte bit_depth) { this->next = *that; *that = this; return bit_depth >= 8 && (colour_type == PNG_COLOR_TYPE_RGB || colour_type == PNG_COLOR_TYPE_GRAY); } #undef data IT(filler); #undef PT #define PT ITSTRUCT(filler) /* png_set_add_alpha, (png_structp png_ptr, png_uint_32 filler, int flags)); */ /* Add an alpha byte to 8-bit Gray or 24-bit RGB images. */ #define data ITDATA(add_alpha) static struct { png_uint_32 filler; int flags; } data; static void image_transform_png_set_add_alpha_set(const image_transform *this, transform_display *that, png_structp pp, png_infop pi) { /* Need a random choice for 'before' and 'after' as well as for the * filler. The 'filler' value has all 32 bits set, but only bit_depth * will be used. At this point we don't know bit_depth. */ RANDOMIZE(data.filler); data.flags = random_choice(); png_set_add_alpha(pp, data.filler, data.flags); this->next->set(this->next, that, pp, pi); } static void image_transform_png_set_add_alpha_mod(const image_transform *this, image_pixel *that, png_const_structp pp, const transform_display *display) { if (that->bit_depth >= 8 && (that->colour_type == PNG_COLOR_TYPE_RGB || that->colour_type == PNG_COLOR_TYPE_GRAY)) { const unsigned int max = (1U << that->bit_depth)-1; that->alpha = data.filler & max; that->alphaf = ((double)that->alpha) / max; that->alphae = 0; that->colour_type |= 4; /* alpha added */ that->alpha_first = data.flags == PNG_FILLER_BEFORE; } this->next->mod(this->next, that, pp, display); } static int image_transform_png_set_add_alpha_add(image_transform *this, const image_transform **that, png_byte colour_type, png_byte bit_depth) { this->next = *that; *that = this; return bit_depth >= 8 && (colour_type == PNG_COLOR_TYPE_RGB || colour_type == PNG_COLOR_TYPE_GRAY); } #undef data IT(add_alpha); #undef PT #define PT ITSTRUCT(add_alpha) #endif /* PNG_READ_FILLER_SUPPORTED */ /* png_set_packing */ #ifdef PNG_READ_PACK_SUPPORTED /* Use 1 byte per pixel in 1, 2, or 4-bit depth files. * * png_set_packing(png_structrp png_ptr) * * This should only affect grayscale and palette images with less than 8 bits * per pixel. */ static void image_transform_png_set_packing_set(const image_transform *this, transform_display *that, png_structp pp, png_infop pi) { png_set_packing(pp); that->unpacked = 1; this->next->set(this->next, that, pp, pi); } static void image_transform_png_set_packing_mod(const image_transform *this, image_pixel *that, png_const_structp pp, const transform_display *display) { /* The general expand case depends on what the colour type is, * low bit-depth pixel values are unpacked into bytes without * scaling, so sample_depth is not changed. */ if (that->bit_depth < 8) /* grayscale or palette */ that->bit_depth = 8; this->next->mod(this->next, that, pp, display); } static int image_transform_png_set_packing_add(image_transform *this, const image_transform **that, png_byte colour_type, png_byte bit_depth) { UNUSED(colour_type) this->next = *that; *that = this; /* Nothing should happen unless the bit depth is less than 8: */ return bit_depth < 8; } IT(packing); #undef PT #define PT ITSTRUCT(packing) #endif /* PNG_READ_PACK_SUPPORTED */ /* png_set_packswap */ #ifdef PNG_READ_PACKSWAP_SUPPORTED /* Swap pixels packed into bytes; reverses the order on screen so that * the high order bits correspond to the rightmost pixels. * * png_set_packswap(png_structrp png_ptr) */ static void image_transform_png_set_packswap_set(const image_transform *this, transform_display *that, png_structp pp, png_infop pi) { png_set_packswap(pp); that->this.littleendian = 1; this->next->set(this->next, that, pp, pi); } static void image_transform_png_set_packswap_mod(const image_transform *this, image_pixel *that, png_const_structp pp, const transform_display *display) { if (that->bit_depth < 8) that->littleendian = 1; this->next->mod(this->next, that, pp, display); } static int image_transform_png_set_packswap_add(image_transform *this, const image_transform **that, png_byte colour_type, png_byte bit_depth) { UNUSED(colour_type) this->next = *that; *that = this; return bit_depth < 8; } IT(packswap); #undef PT #define PT ITSTRUCT(packswap) #endif /* PNG_READ_PACKSWAP_SUPPORTED */ /* png_set_invert_mono */ #ifdef PNG_READ_INVERT_MONO_SUPPORTED /* Invert the gray channel * * png_set_invert_mono(png_structrp png_ptr) */ static void image_transform_png_set_invert_mono_set(const image_transform *this, transform_display *that, png_structp pp, png_infop pi) { png_set_invert_mono(pp); this->next->set(this->next, that, pp, pi); } static void image_transform_png_set_invert_mono_mod(const image_transform *this, image_pixel *that, png_const_structp pp, const transform_display *display) { if (that->colour_type & 4) that->mono_inverted = 1; this->next->mod(this->next, that, pp, display); } static int image_transform_png_set_invert_mono_add(image_transform *this, const image_transform **that, png_byte colour_type, png_byte bit_depth) { UNUSED(bit_depth) this->next = *that; *that = this; /* Only has an effect on pixels with no colour: */ return (colour_type & 2) == 0; } IT(invert_mono); #undef PT #define PT ITSTRUCT(invert_mono) #endif /* PNG_READ_INVERT_MONO_SUPPORTED */ #ifdef PNG_READ_SHIFT_SUPPORTED /* png_set_shift(png_structp, png_const_color_8p true_bits) * * The output pixels will be shifted by the given true_bits * values. */ #define data ITDATA(shift) static png_color_8 data; static void image_transform_png_set_shift_set(const image_transform *this, transform_display *that, png_structp pp, png_infop pi) { /* Get a random set of shifts. The shifts need to do something * to test the transform, so they are limited to the bit depth * of the input image. Notice that in the following the 'gray' * field is randomized independently. This acts as a check that * libpng does use the correct field. */ const unsigned int depth = that->this.bit_depth; data.red = (png_byte)/*SAFE*/(random_mod(depth)+1); data.green = (png_byte)/*SAFE*/(random_mod(depth)+1); data.blue = (png_byte)/*SAFE*/(random_mod(depth)+1); data.gray = (png_byte)/*SAFE*/(random_mod(depth)+1); data.alpha = (png_byte)/*SAFE*/(random_mod(depth)+1); png_set_shift(pp, &data); this->next->set(this->next, that, pp, pi); } static void image_transform_png_set_shift_mod(const image_transform *this, image_pixel *that, png_const_structp pp, const transform_display *display) { /* Copy the correct values into the sBIT fields, libpng does not do * anything to palette data: */ if (that->colour_type != PNG_COLOR_TYPE_PALETTE) { that->sig_bits = 1; /* The sBIT fields are reset to the values previously sent to * png_set_shift according to the colour type. * does. */ if (that->colour_type & 2) /* RGB channels */ { that->red_sBIT = data.red; that->green_sBIT = data.green; that->blue_sBIT = data.blue; } else /* One grey channel */ that->red_sBIT = that->green_sBIT = that->blue_sBIT = data.gray; that->alpha_sBIT = data.alpha; } this->next->mod(this->next, that, pp, display); } static int image_transform_png_set_shift_add(image_transform *this, const image_transform **that, png_byte colour_type, png_byte bit_depth) { UNUSED(bit_depth) this->next = *that; *that = this; return colour_type != PNG_COLOR_TYPE_PALETTE; } IT(shift); #undef PT #define PT ITSTRUCT(shift) #endif /* PNG_READ_SHIFT_SUPPORTED */ #ifdef THIS_IS_THE_PROFORMA static void image_transform_png_set_@_set(const image_transform *this, transform_display *that, png_structp pp, png_infop pi) { png_set_@(pp); this->next->set(this->next, that, pp, pi); } static void image_transform_png_set_@_mod(const image_transform *this, image_pixel *that, png_const_structp pp, const transform_display *display) { this->next->mod(this->next, that, pp, display); } static int image_transform_png_set_@_add(image_transform *this, const image_transform **that, png_byte colour_type, png_byte bit_depth) { this->next = *that; *that = this; return 1; } IT(@); #endif /* This may just be 'end' if all the transforms are disabled! */ static image_transform *const image_transform_first = &PT; static void transform_enable(const char *name) { /* Everything starts out enabled, so if we see an 'enable' disabled * everything else the first time round. */ static int all_disabled = 0; int found_it = 0; image_transform *list = image_transform_first; while (list != &image_transform_end) { if (strcmp(list->name, name) == 0) { list->enable = 1; found_it = 1; } else if (!all_disabled) list->enable = 0; list = list->list; } all_disabled = 1; if (!found_it) { fprintf(stderr, "pngvalid: --transform-enable=%s: unknown transform\n", name); exit(99); } }
173,713
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: xps_encode_font_char_imp(xps_font_t *font, int code) { byte *table; /* no cmap selected: return identity */ if (font->cmapsubtable <= 0) return code; table = font->data + font->cmapsubtable; switch (u16(table)) { case 0: /* Apple standard 1-to-1 mapping. */ return table[code + 6]; case 4: /* Microsoft/Adobe segmented mapping. */ { int segCount2 = u16(table + 6); byte *endCount = table + 14; byte *startCount = endCount + segCount2 + 2; byte *idDelta = startCount + segCount2; byte *idRangeOffset = idDelta + segCount2; int i2; for (i2 = 0; i2 < segCount2 - 3; i2 += 2) { int delta, roff; int start = u16(startCount + i2); continue; delta = s16(idDelta + i2); roff = s16(idRangeOffset + i2); if ( roff == 0 ) { return ( code + delta ) & 0xffff; /* mod 65536 */ return 0; } if ( roff == 0 ) { return ( code + delta ) & 0xffff; /* mod 65536 */ return 0; } glyph = u16(idRangeOffset + i2 + roff + ((code - start) << 1)); return (glyph == 0 ? 0 : glyph + delta); } case 6: /* Single interval lookup. */ { int firstCode = u16(table + 6); int entryCount = u16(table + 8); if ( code < firstCode || code >= firstCode + entryCount ) return 0; return u16(table + 10 + ((code - firstCode) << 1)); } case 10: /* Trimmed array (like 6) */ { int startCharCode = u32(table + 12); int numChars = u32(table + 16); if ( code < startCharCode || code >= startCharCode + numChars ) return 0; return u32(table + 20 + (code - startCharCode) * 4); } case 12: /* Segmented coverage. (like 4) */ { int nGroups = u32(table + 12); byte *group = table + 16; int i; for (i = 0; i < nGroups; i++) { int startCharCode = u32(group + 0); int endCharCode = u32(group + 4); int startGlyphID = u32(group + 8); if ( code < startCharCode ) return 0; if ( code <= endCharCode ) return startGlyphID + (code - startCharCode); group += 12; } return 0; } case 2: /* High-byte mapping through table. */ case 8: /* Mixed 16-bit and 32-bit coverage (like 2) */ default: gs_warn1("unknown cmap format: %d\n", u16(table)); return 0; } return 0; } /* * Given a GID, reverse the CMAP subtable lookup to turn it back into a character code * We need a Unicode return value, so we might need to do some fixed tables for * certain kinds of CMAP subtables (ie non-Unicode ones). That would be a future enhancement * if we ever encounter such a beast. */ static int xps_decode_font_char_imp(xps_font_t *font, int code) { byte *table; /* no cmap selected: return identity */ if (font->cmapsubtable <= 0) return code; table = font->data + font->cmapsubtable; switch (u16(table)) { case 0: /* Apple standard 1-to-1 mapping. */ { int i, length = u16(&table[2]) - 6; if (length < 0 || length > 256) return gs_error_invalidfont; for (i=0;i<length;i++) { if (table[6 + i] == code) return i; } } return 0; case 4: /* Microsoft/Adobe segmented mapping. */ { int segCount2 = u16(table + 6); byte *endCount = table + 14; byte *startCount = endCount + segCount2 + 2; byte *idDelta = startCount + segCount2; byte *idRangeOffset = idDelta + segCount2; int i2; if (segCount2 < 3 || segCount2 > 65535) return gs_error_invalidfont; byte *startCount = endCount + segCount2 + 2; byte *idDelta = startCount + segCount2; byte *idRangeOffset = idDelta + segCount2; int i2; if (segCount2 < 3 || segCount2 > 65535) return gs_error_invalidfont; for (i2 = 0; i2 < segCount2 - 3; i2 += 2) if (roff == 0) { glyph = (i + delta) & 0xffff; } else { glyph = u16(idRangeOffset + i2 + roff + ((i - start) << 1)); } if (glyph == code) { return i; } } } if (roff == 0) { glyph = (i + delta) & 0xffff; } else { glyph = u16(idRangeOffset + i2 + roff + ((i - start) << 1)); } if (glyph == code) { return i; ch = u16(&table[10 + (i * 2)]); if (ch == code) return (firstCode + i); } } return 0; case 10: /* Trimmed array (like 6) */ { unsigned int ch, i, length = u32(&table[20]); int firstCode = u32(&table[16]); for (i=0;i<length;i++) { ch = u16(&table[10 + (i * 2)]); if (ch == code) return (firstCode + i); } } return 0; case 12: /* Segmented coverage. (like 4) */ { unsigned int nGroups = u32(&table[12]); int Group; for (Group=0;Group<nGroups;Group++) { int startCharCode = u32(&table[16 + (Group * 12)]); int endCharCode = u32(&table[16 + (Group * 12) + 4]); int startGlyphCode = u32(&table[16 + (Group * 12) + 8]); if (code >= startGlyphCode && code <= (startGlyphCode + (endCharCode - startCharCode))) { return startGlyphCode + (code - startCharCode); } } } return 0; case 2: /* High-byte mapping through table. */ case 8: /* Mixed 16-bit and 32-bit coverage (like 2) */ default: gs_warn1("unknown cmap format: %d\n", u16(table)); return 0; } Commit Message: CWE ID: CWE-125
xps_encode_font_char_imp(xps_font_t *font, int code) { byte *table; /* no cmap selected: return identity */ if (font->cmapsubtable <= 0) return code; table = font->data + font->cmapsubtable; switch (u16(table)) { case 0: /* Apple standard 1-to-1 mapping. */ return table[code + 6]; case 4: /* Microsoft/Adobe segmented mapping. */ { int segCount2 = u16(table + 6); byte *endCount = table + 14; byte *startCount = endCount + segCount2 + 2; byte *idDelta = startCount + segCount2; byte *idRangeOffset = idDelta + segCount2; byte *giddata; int i2; if (segCount2 < 3 || segCount2 > 65535 || idRangeOffset > font->data + font->length) return gs_error_invalidfont; for (i2 = 0; i2 < segCount2 - 3; i2 += 2) { int delta, roff; int start = u16(startCount + i2); continue; delta = s16(idDelta + i2); roff = s16(idRangeOffset + i2); if ( roff == 0 ) { return ( code + delta ) & 0xffff; /* mod 65536 */ return 0; } if ( roff == 0 ) { return ( code + delta ) & 0xffff; /* mod 65536 */ } if ((giddata = (idRangeOffset + i2 + roff + ((code - start) << 1))) > font->data + font->length) { return code; } glyph = u16(giddata); return (glyph == 0 ? 0 : glyph + delta); } case 6: /* Single interval lookup. */ { int firstCode = u16(table + 6); int entryCount = u16(table + 8); if ( code < firstCode || code >= firstCode + entryCount ) return 0; return u16(table + 10 + ((code - firstCode) << 1)); } case 10: /* Trimmed array (like 6) */ { int startCharCode = u32(table + 12); int numChars = u32(table + 16); if ( code < startCharCode || code >= startCharCode + numChars ) return 0; return u32(table + 20 + (code - startCharCode) * 4); } case 12: /* Segmented coverage. (like 4) */ { int nGroups = u32(table + 12); byte *group = table + 16; int i; for (i = 0; i < nGroups; i++) { int startCharCode = u32(group + 0); int endCharCode = u32(group + 4); int startGlyphID = u32(group + 8); if ( code < startCharCode ) return 0; if ( code <= endCharCode ) return startGlyphID + (code - startCharCode); group += 12; } return 0; } case 2: /* High-byte mapping through table. */ case 8: /* Mixed 16-bit and 32-bit coverage (like 2) */ default: gs_warn1("unknown cmap format: %d\n", u16(table)); return 0; } return 0; } /* * Given a GID, reverse the CMAP subtable lookup to turn it back into a character code * We need a Unicode return value, so we might need to do some fixed tables for * certain kinds of CMAP subtables (ie non-Unicode ones). That would be a future enhancement * if we ever encounter such a beast. */ static int xps_decode_font_char_imp(xps_font_t *font, int code) { byte *table; /* no cmap selected: return identity */ if (font->cmapsubtable <= 0) return code; table = font->data + font->cmapsubtable; switch (u16(table)) { case 0: /* Apple standard 1-to-1 mapping. */ { int i, length = u16(&table[2]) - 6; if (length < 0 || length > 256) return gs_error_invalidfont; for (i=0;i<length;i++) { if (table[6 + i] == code) return i; } } return 0; case 4: /* Microsoft/Adobe segmented mapping. */ { int segCount2 = u16(table + 6); byte *endCount = table + 14; byte *startCount = endCount + segCount2 + 2; byte *idDelta = startCount + segCount2; byte *idRangeOffset = idDelta + segCount2; int i2; if (segCount2 < 3 || segCount2 > 65535) return gs_error_invalidfont; byte *startCount = endCount + segCount2 + 2; byte *idDelta = startCount + segCount2; byte *idRangeOffset = idDelta + segCount2; byte *giddata; int i2; if (segCount2 < 3 || segCount2 > 65535 || idRangeOffset > font->data + font->length) return gs_error_invalidfont; for (i2 = 0; i2 < segCount2 - 3; i2 += 2) if (roff == 0) { glyph = (i + delta) & 0xffff; } else { glyph = u16(idRangeOffset + i2 + roff + ((i - start) << 1)); } if (glyph == code) { return i; } } } if (roff == 0) { glyph = (i + delta) & 0xffff; } else { if ((giddata = (idRangeOffset + i2 + roff + ((i - start) << 1))) > font->data + font->length) { return_error(gs_error_invalidfont); } glyph = u16(giddata); } if (glyph == code) { return i; ch = u16(&table[10 + (i * 2)]); if (ch == code) return (firstCode + i); } } return 0; case 10: /* Trimmed array (like 6) */ { unsigned int ch, i, length = u32(&table[20]); int firstCode = u32(&table[16]); for (i=0;i<length;i++) { ch = u16(&table[10 + (i * 2)]); if (ch == code) return (firstCode + i); } } return 0; case 12: /* Segmented coverage. (like 4) */ { unsigned int nGroups = u32(&table[12]); int Group; for (Group=0;Group<nGroups;Group++) { int startCharCode = u32(&table[16 + (Group * 12)]); int endCharCode = u32(&table[16 + (Group * 12) + 4]); int startGlyphCode = u32(&table[16 + (Group * 12) + 8]); if (code >= startGlyphCode && code <= (startGlyphCode + (endCharCode - startCharCode))) { return startGlyphCode + (code - startCharCode); } } } return 0; case 2: /* High-byte mapping through table. */ case 8: /* Mixed 16-bit and 32-bit coverage (like 2) */ default: gs_warn1("unknown cmap format: %d\n", u16(table)); return 0; }
164,777
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void ImageInputType::ensurePrimaryContent() { if (!m_useFallbackContent) return; m_useFallbackContent = false; reattachFallbackContent(); } Commit Message: ImageInputType::ensurePrimaryContent should recreate UA shadow tree. Once the fallback shadow tree was created, it was never recreated even if ensurePrimaryContent was called. Such situation happens by updating |src| attribute. BUG=589838 Review URL: https://codereview.chromium.org/1732753004 Cr-Commit-Position: refs/heads/master@{#377804} CWE ID: CWE-361
void ImageInputType::ensurePrimaryContent() { if (!m_useFallbackContent) return; m_useFallbackContent = false; if (ShadowRoot* root = element().userAgentShadowRoot()) root->removeChildren(); createShadowSubtree(); reattachFallbackContent(); }
172,285
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int dmarc_process() { int sr, origin; /* used in SPF section */ int dmarc_spf_result = 0; /* stores spf into dmarc conn ctx */ pdkim_signature *sig = NULL; BOOL has_dmarc_record = TRUE; u_char **ruf; /* forensic report addressees, if called for */ /* ACLs have "control=dmarc_disable_verify" */ if (dmarc_disable_verify == TRUE) { dmarc_ar_header = dmarc_auth_results_header(from_header, NULL); return OK; } /* Store the header From: sender domain for this part of DMARC. * If there is no from_header struct, then it's likely this message * is locally generated and relying on fixups to add it. Just skip * the entire DMARC system if we can't find a From: header....or if * there was a previous error. */ if (from_header == NULL || dmarc_abort == TRUE) dmarc_abort = TRUE; else { /* I strongly encourage anybody who can make this better to contact me directly! * <cannonball> Is this an insane way to extract the email address from the From: header? * <jgh_hm> it's sure a horrid layer-crossing.... * <cannonball> I'm not denying that :-/ * <jgh_hm> there may well be no better though */ header_from_sender = expand_string( string_sprintf("${domain:${extract{1}{:}{${addresses:%s}}}}", from_header->text) ); /* The opendmarc library extracts the domain from the email address, but * only try to store it if it's not empty. Otherwise, skip out of DMARC. */ if (strcmp( CCS header_from_sender, "") == 0) dmarc_abort = TRUE; libdm_status = (dmarc_abort == TRUE) ? DMARC_PARSE_OKAY : opendmarc_policy_store_from_domain(dmarc_pctx, header_from_sender); if (libdm_status != DMARC_PARSE_OKAY) { log_write(0, LOG_MAIN|LOG_PANIC, "failure to store header From: in DMARC: %s, header was '%s'", opendmarc_policy_status_to_str(libdm_status), from_header->text); dmarc_abort = TRUE; } } /* Use the envelope sender domain for this part of DMARC */ spf_sender_domain = expand_string(US"$sender_address_domain"); if ( spf_response == NULL ) { /* No spf data means null envelope sender so generate a domain name * from the sender_helo_name */ if (spf_sender_domain == NULL) { spf_sender_domain = sender_helo_name; log_write(0, LOG_MAIN, "DMARC using synthesized SPF sender domain = %s\n", spf_sender_domain); DEBUG(D_receive) debug_printf("DMARC using synthesized SPF sender domain = %s\n", spf_sender_domain); } dmarc_spf_result = DMARC_POLICY_SPF_OUTCOME_NONE; dmarc_spf_ares_result = ARES_RESULT_UNKNOWN; origin = DMARC_POLICY_SPF_ORIGIN_HELO; spf_human_readable = US""; } else { sr = spf_response->result; dmarc_spf_result = (sr == SPF_RESULT_NEUTRAL) ? DMARC_POLICY_SPF_OUTCOME_NONE : (sr == SPF_RESULT_PASS) ? DMARC_POLICY_SPF_OUTCOME_PASS : (sr == SPF_RESULT_FAIL) ? DMARC_POLICY_SPF_OUTCOME_FAIL : (sr == SPF_RESULT_SOFTFAIL) ? DMARC_POLICY_SPF_OUTCOME_TMPFAIL : DMARC_POLICY_SPF_OUTCOME_NONE; dmarc_spf_ares_result = (sr == SPF_RESULT_NEUTRAL) ? ARES_RESULT_NEUTRAL : (sr == SPF_RESULT_PASS) ? ARES_RESULT_PASS : (sr == SPF_RESULT_FAIL) ? ARES_RESULT_FAIL : (sr == SPF_RESULT_SOFTFAIL) ? ARES_RESULT_SOFTFAIL : (sr == SPF_RESULT_NONE) ? ARES_RESULT_NONE : (sr == SPF_RESULT_TEMPERROR) ? ARES_RESULT_TEMPERROR : (sr == SPF_RESULT_PERMERROR) ? ARES_RESULT_PERMERROR : ARES_RESULT_UNKNOWN; origin = DMARC_POLICY_SPF_ORIGIN_MAILFROM; spf_human_readable = (uschar *)spf_response->header_comment; DEBUG(D_receive) debug_printf("DMARC using SPF sender domain = %s\n", spf_sender_domain); } if (strcmp( CCS spf_sender_domain, "") == 0) dmarc_abort = TRUE; if (dmarc_abort == FALSE) { libdm_status = opendmarc_policy_store_spf(dmarc_pctx, spf_sender_domain, dmarc_spf_result, origin, spf_human_readable); if (libdm_status != DMARC_PARSE_OKAY) log_write(0, LOG_MAIN|LOG_PANIC, "failure to store spf for DMARC: %s", opendmarc_policy_status_to_str(libdm_status)); } /* Now we cycle through the dkim signature results and put into * the opendmarc context, further building the DMARC reply. */ sig = dkim_signatures; dkim_history_buffer = US""; while (sig != NULL) { int dkim_result, dkim_ares_result, vs, ves; vs = sig->verify_status; ves = sig->verify_ext_status; dkim_result = ( vs == PDKIM_VERIFY_PASS ) ? DMARC_POLICY_DKIM_OUTCOME_PASS : ( vs == PDKIM_VERIFY_FAIL ) ? DMARC_POLICY_DKIM_OUTCOME_FAIL : ( vs == PDKIM_VERIFY_INVALID ) ? DMARC_POLICY_DKIM_OUTCOME_TMPFAIL : DMARC_POLICY_DKIM_OUTCOME_NONE; libdm_status = opendmarc_policy_store_dkim(dmarc_pctx, (uschar *)sig->domain, dkim_result, US""); DEBUG(D_receive) debug_printf("DMARC adding DKIM sender domain = %s\n", sig->domain); if (libdm_status != DMARC_PARSE_OKAY) log_write(0, LOG_MAIN|LOG_PANIC, "failure to store dkim (%s) for DMARC: %s", sig->domain, opendmarc_policy_status_to_str(libdm_status)); dkim_ares_result = ( vs == PDKIM_VERIFY_PASS ) ? ARES_RESULT_PASS : ( vs == PDKIM_VERIFY_FAIL ) ? ARES_RESULT_FAIL : ( vs == PDKIM_VERIFY_NONE ) ? ARES_RESULT_NONE : ( vs == PDKIM_VERIFY_INVALID ) ? ( ves == PDKIM_VERIFY_INVALID_PUBKEY_UNAVAILABLE ? ARES_RESULT_PERMERROR : ves == PDKIM_VERIFY_INVALID_BUFFER_SIZE ? ARES_RESULT_PERMERROR : ves == PDKIM_VERIFY_INVALID_PUBKEY_PARSING ? ARES_RESULT_PERMERROR : ARES_RESULT_UNKNOWN ) : ARES_RESULT_UNKNOWN; dkim_history_buffer = string_sprintf("%sdkim %s %d\n", dkim_history_buffer, sig->domain, dkim_ares_result); sig = sig->next; } libdm_status = opendmarc_policy_query_dmarc(dmarc_pctx, US""); switch (libdm_status) { case DMARC_DNS_ERROR_NXDOMAIN: case DMARC_DNS_ERROR_NO_RECORD: DEBUG(D_receive) debug_printf("DMARC no record found for %s\n", header_from_sender); has_dmarc_record = FALSE; break; case DMARC_PARSE_OKAY: DEBUG(D_receive) debug_printf("DMARC record found for %s\n", header_from_sender); break; case DMARC_PARSE_ERROR_BAD_VALUE: DEBUG(D_receive) debug_printf("DMARC record parse error for %s\n", header_from_sender); has_dmarc_record = FALSE; break; default: /* everything else, skip dmarc */ DEBUG(D_receive) debug_printf("DMARC skipping (%d), unsure what to do with %s", libdm_status, from_header->text); has_dmarc_record = FALSE; break; } /* Can't use exim's string manipulation functions so allocate memory * for libopendmarc using its max hostname length definition. */ uschar *dmarc_domain = (uschar *)calloc(DMARC_MAXHOSTNAMELEN, sizeof(uschar)); libdm_status = opendmarc_policy_fetch_utilized_domain(dmarc_pctx, dmarc_domain, DMARC_MAXHOSTNAMELEN-1); dmarc_used_domain = string_copy(dmarc_domain); free(dmarc_domain); if (libdm_status != DMARC_PARSE_OKAY) { log_write(0, LOG_MAIN|LOG_PANIC, "failure to read domainname used for DMARC lookup: %s", opendmarc_policy_status_to_str(libdm_status)); } libdm_status = opendmarc_get_policy_to_enforce(dmarc_pctx); dmarc_policy = libdm_status; switch(libdm_status) { case DMARC_POLICY_ABSENT: /* No DMARC record found */ dmarc_status = US"norecord"; dmarc_pass_fail = US"none"; dmarc_status_text = US"No DMARC record"; action = DMARC_RESULT_ACCEPT; break; case DMARC_FROM_DOMAIN_ABSENT: /* No From: domain */ dmarc_status = US"nofrom"; dmarc_pass_fail = US"temperror"; dmarc_status_text = US"No From: domain found"; action = DMARC_RESULT_ACCEPT; break; case DMARC_POLICY_NONE: /* Accept and report */ dmarc_status = US"none"; dmarc_pass_fail = US"none"; dmarc_status_text = US"None, Accept"; action = DMARC_RESULT_ACCEPT; break; case DMARC_POLICY_PASS: /* Explicit accept */ dmarc_status = US"accept"; dmarc_pass_fail = US"pass"; dmarc_status_text = US"Accept"; action = DMARC_RESULT_ACCEPT; break; case DMARC_POLICY_REJECT: /* Explicit reject */ dmarc_status = US"reject"; dmarc_pass_fail = US"fail"; dmarc_status_text = US"Reject"; action = DMARC_RESULT_REJECT; break; case DMARC_POLICY_QUARANTINE: /* Explicit quarantine */ dmarc_status = US"quarantine"; dmarc_pass_fail = US"fail"; dmarc_status_text = US"Quarantine"; action = DMARC_RESULT_QUARANTINE; break; default: dmarc_status = US"temperror"; dmarc_pass_fail = US"temperror"; dmarc_status_text = US"Internal Policy Error"; action = DMARC_RESULT_TEMPFAIL; break; } libdm_status = opendmarc_policy_fetch_alignment(dmarc_pctx, &da, &sa); if (libdm_status != DMARC_PARSE_OKAY) { log_write(0, LOG_MAIN|LOG_PANIC, "failure to read DMARC alignment: %s", opendmarc_policy_status_to_str(libdm_status)); } if (has_dmarc_record == TRUE) { log_write(0, LOG_MAIN, "DMARC results: spf_domain=%s dmarc_domain=%s " "spf_align=%s dkim_align=%s enforcement='%s'", spf_sender_domain, dmarc_used_domain, (sa==DMARC_POLICY_SPF_ALIGNMENT_PASS) ?"yes":"no", (da==DMARC_POLICY_DKIM_ALIGNMENT_PASS)?"yes":"no", dmarc_status_text); history_file_status = dmarc_write_history_file(); /* Now get the forensic reporting addresses, if any */ ruf = opendmarc_policy_fetch_ruf(dmarc_pctx, NULL, 0, 1); dmarc_send_forensic_report(ruf); } } Commit Message: CWE ID: CWE-20
int dmarc_process() { int sr, origin; /* used in SPF section */ int dmarc_spf_result = 0; /* stores spf into dmarc conn ctx */ pdkim_signature *sig = NULL; BOOL has_dmarc_record = TRUE; u_char **ruf; /* forensic report addressees, if called for */ /* ACLs have "control=dmarc_disable_verify" */ if (dmarc_disable_verify == TRUE) { dmarc_ar_header = dmarc_auth_results_header(from_header, NULL); return OK; } /* Store the header From: sender domain for this part of DMARC. * If there is no from_header struct, then it's likely this message * is locally generated and relying on fixups to add it. Just skip * the entire DMARC system if we can't find a From: header....or if * there was a previous error. */ if (from_header == NULL || dmarc_abort == TRUE) dmarc_abort = TRUE; else { uschar * errormsg; int dummy, domain; uschar * p; uschar saveend; parse_allow_group = TRUE; p = parse_find_address_end(from_header->text, FALSE); saveend = *p; *p = '\0'; if ((header_from_sender = parse_extract_address(from_header->text, &errormsg, &dummy, &dummy, &domain, FALSE))) header_from_sender += domain; *p = saveend; /* The opendmarc library extracts the domain from the email address, but * only try to store it if it's not empty. Otherwise, skip out of DMARC. */ if (!header_from_sender || (strcmp( CCS header_from_sender, "") == 0)) dmarc_abort = TRUE; libdm_status = dmarc_abort ? DMARC_PARSE_OKAY : opendmarc_policy_store_from_domain(dmarc_pctx, header_from_sender); if (libdm_status != DMARC_PARSE_OKAY) { log_write(0, LOG_MAIN|LOG_PANIC, "failure to store header From: in DMARC: %s, header was '%s'", opendmarc_policy_status_to_str(libdm_status), from_header->text); dmarc_abort = TRUE; } } /* Use the envelope sender domain for this part of DMARC */ spf_sender_domain = expand_string(US"$sender_address_domain"); if ( spf_response == NULL ) { /* No spf data means null envelope sender so generate a domain name * from the sender_helo_name */ if (spf_sender_domain == NULL) { spf_sender_domain = sender_helo_name; log_write(0, LOG_MAIN, "DMARC using synthesized SPF sender domain = %s\n", spf_sender_domain); DEBUG(D_receive) debug_printf("DMARC using synthesized SPF sender domain = %s\n", spf_sender_domain); } dmarc_spf_result = DMARC_POLICY_SPF_OUTCOME_NONE; dmarc_spf_ares_result = ARES_RESULT_UNKNOWN; origin = DMARC_POLICY_SPF_ORIGIN_HELO; spf_human_readable = US""; } else { sr = spf_response->result; dmarc_spf_result = (sr == SPF_RESULT_NEUTRAL) ? DMARC_POLICY_SPF_OUTCOME_NONE : (sr == SPF_RESULT_PASS) ? DMARC_POLICY_SPF_OUTCOME_PASS : (sr == SPF_RESULT_FAIL) ? DMARC_POLICY_SPF_OUTCOME_FAIL : (sr == SPF_RESULT_SOFTFAIL) ? DMARC_POLICY_SPF_OUTCOME_TMPFAIL : DMARC_POLICY_SPF_OUTCOME_NONE; dmarc_spf_ares_result = (sr == SPF_RESULT_NEUTRAL) ? ARES_RESULT_NEUTRAL : (sr == SPF_RESULT_PASS) ? ARES_RESULT_PASS : (sr == SPF_RESULT_FAIL) ? ARES_RESULT_FAIL : (sr == SPF_RESULT_SOFTFAIL) ? ARES_RESULT_SOFTFAIL : (sr == SPF_RESULT_NONE) ? ARES_RESULT_NONE : (sr == SPF_RESULT_TEMPERROR) ? ARES_RESULT_TEMPERROR : (sr == SPF_RESULT_PERMERROR) ? ARES_RESULT_PERMERROR : ARES_RESULT_UNKNOWN; origin = DMARC_POLICY_SPF_ORIGIN_MAILFROM; spf_human_readable = (uschar *)spf_response->header_comment; DEBUG(D_receive) debug_printf("DMARC using SPF sender domain = %s\n", spf_sender_domain); } if (strcmp( CCS spf_sender_domain, "") == 0) dmarc_abort = TRUE; if (dmarc_abort == FALSE) { libdm_status = opendmarc_policy_store_spf(dmarc_pctx, spf_sender_domain, dmarc_spf_result, origin, spf_human_readable); if (libdm_status != DMARC_PARSE_OKAY) log_write(0, LOG_MAIN|LOG_PANIC, "failure to store spf for DMARC: %s", opendmarc_policy_status_to_str(libdm_status)); } /* Now we cycle through the dkim signature results and put into * the opendmarc context, further building the DMARC reply. */ sig = dkim_signatures; dkim_history_buffer = US""; while (sig != NULL) { int dkim_result, dkim_ares_result, vs, ves; vs = sig->verify_status; ves = sig->verify_ext_status; dkim_result = ( vs == PDKIM_VERIFY_PASS ) ? DMARC_POLICY_DKIM_OUTCOME_PASS : ( vs == PDKIM_VERIFY_FAIL ) ? DMARC_POLICY_DKIM_OUTCOME_FAIL : ( vs == PDKIM_VERIFY_INVALID ) ? DMARC_POLICY_DKIM_OUTCOME_TMPFAIL : DMARC_POLICY_DKIM_OUTCOME_NONE; libdm_status = opendmarc_policy_store_dkim(dmarc_pctx, (uschar *)sig->domain, dkim_result, US""); DEBUG(D_receive) debug_printf("DMARC adding DKIM sender domain = %s\n", sig->domain); if (libdm_status != DMARC_PARSE_OKAY) log_write(0, LOG_MAIN|LOG_PANIC, "failure to store dkim (%s) for DMARC: %s", sig->domain, opendmarc_policy_status_to_str(libdm_status)); dkim_ares_result = ( vs == PDKIM_VERIFY_PASS ) ? ARES_RESULT_PASS : ( vs == PDKIM_VERIFY_FAIL ) ? ARES_RESULT_FAIL : ( vs == PDKIM_VERIFY_NONE ) ? ARES_RESULT_NONE : ( vs == PDKIM_VERIFY_INVALID ) ? ( ves == PDKIM_VERIFY_INVALID_PUBKEY_UNAVAILABLE ? ARES_RESULT_PERMERROR : ves == PDKIM_VERIFY_INVALID_BUFFER_SIZE ? ARES_RESULT_PERMERROR : ves == PDKIM_VERIFY_INVALID_PUBKEY_PARSING ? ARES_RESULT_PERMERROR : ARES_RESULT_UNKNOWN ) : ARES_RESULT_UNKNOWN; dkim_history_buffer = string_sprintf("%sdkim %s %d\n", dkim_history_buffer, sig->domain, dkim_ares_result); sig = sig->next; } libdm_status = opendmarc_policy_query_dmarc(dmarc_pctx, US""); switch (libdm_status) { case DMARC_DNS_ERROR_NXDOMAIN: case DMARC_DNS_ERROR_NO_RECORD: DEBUG(D_receive) debug_printf("DMARC no record found for %s\n", header_from_sender); has_dmarc_record = FALSE; break; case DMARC_PARSE_OKAY: DEBUG(D_receive) debug_printf("DMARC record found for %s\n", header_from_sender); break; case DMARC_PARSE_ERROR_BAD_VALUE: DEBUG(D_receive) debug_printf("DMARC record parse error for %s\n", header_from_sender); has_dmarc_record = FALSE; break; default: /* everything else, skip dmarc */ DEBUG(D_receive) debug_printf("DMARC skipping (%d), unsure what to do with %s", libdm_status, from_header->text); has_dmarc_record = FALSE; break; } /* Can't use exim's string manipulation functions so allocate memory * for libopendmarc using its max hostname length definition. */ uschar *dmarc_domain = (uschar *)calloc(DMARC_MAXHOSTNAMELEN, sizeof(uschar)); libdm_status = opendmarc_policy_fetch_utilized_domain(dmarc_pctx, dmarc_domain, DMARC_MAXHOSTNAMELEN-1); dmarc_used_domain = string_copy(dmarc_domain); free(dmarc_domain); if (libdm_status != DMARC_PARSE_OKAY) { log_write(0, LOG_MAIN|LOG_PANIC, "failure to read domainname used for DMARC lookup: %s", opendmarc_policy_status_to_str(libdm_status)); } libdm_status = opendmarc_get_policy_to_enforce(dmarc_pctx); dmarc_policy = libdm_status; switch(libdm_status) { case DMARC_POLICY_ABSENT: /* No DMARC record found */ dmarc_status = US"norecord"; dmarc_pass_fail = US"none"; dmarc_status_text = US"No DMARC record"; action = DMARC_RESULT_ACCEPT; break; case DMARC_FROM_DOMAIN_ABSENT: /* No From: domain */ dmarc_status = US"nofrom"; dmarc_pass_fail = US"temperror"; dmarc_status_text = US"No From: domain found"; action = DMARC_RESULT_ACCEPT; break; case DMARC_POLICY_NONE: /* Accept and report */ dmarc_status = US"none"; dmarc_pass_fail = US"none"; dmarc_status_text = US"None, Accept"; action = DMARC_RESULT_ACCEPT; break; case DMARC_POLICY_PASS: /* Explicit accept */ dmarc_status = US"accept"; dmarc_pass_fail = US"pass"; dmarc_status_text = US"Accept"; action = DMARC_RESULT_ACCEPT; break; case DMARC_POLICY_REJECT: /* Explicit reject */ dmarc_status = US"reject"; dmarc_pass_fail = US"fail"; dmarc_status_text = US"Reject"; action = DMARC_RESULT_REJECT; break; case DMARC_POLICY_QUARANTINE: /* Explicit quarantine */ dmarc_status = US"quarantine"; dmarc_pass_fail = US"fail"; dmarc_status_text = US"Quarantine"; action = DMARC_RESULT_QUARANTINE; break; default: dmarc_status = US"temperror"; dmarc_pass_fail = US"temperror"; dmarc_status_text = US"Internal Policy Error"; action = DMARC_RESULT_TEMPFAIL; break; } libdm_status = opendmarc_policy_fetch_alignment(dmarc_pctx, &da, &sa); if (libdm_status != DMARC_PARSE_OKAY) { log_write(0, LOG_MAIN|LOG_PANIC, "failure to read DMARC alignment: %s", opendmarc_policy_status_to_str(libdm_status)); } if (has_dmarc_record == TRUE) { log_write(0, LOG_MAIN, "DMARC results: spf_domain=%s dmarc_domain=%s " "spf_align=%s dkim_align=%s enforcement='%s'", spf_sender_domain, dmarc_used_domain, (sa==DMARC_POLICY_SPF_ALIGNMENT_PASS) ?"yes":"no", (da==DMARC_POLICY_DKIM_ALIGNMENT_PASS)?"yes":"no", dmarc_status_text); history_file_status = dmarc_write_history_file(); /* Now get the forensic reporting addresses, if any */ ruf = opendmarc_policy_fetch_ruf(dmarc_pctx, NULL, 0, 1); dmarc_send_forensic_report(ruf); } }
165,193
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) { unsigned long val; void *ptr = NULL; if (!atomic_pool) { WARN(1, "coherent pool not initialised!\n"); return NULL; } val = gen_pool_alloc(atomic_pool, size); if (val) { phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); *ret_page = phys_to_page(phys); ptr = (void *)val; if (flags & __GFP_ZERO) memset(ptr, 0, size); } return ptr; } Commit Message: arm64: dma-mapping: always clear allocated buffers Buffers allocated by dma_alloc_coherent() are always zeroed on Alpha, ARM (32bit), MIPS, PowerPC, x86/x86_64 and probably other architectures. It turned out that some drivers rely on this 'feature'. Allocated buffer might be also exposed to userspace with dma_mmap() call, so clearing it is desired from security point of view to avoid exposing random memory to userspace. This patch unifies dma_alloc_coherent() behavior on ARM64 architecture with other implementations by unconditionally zeroing allocated buffer. Cc: <[email protected]> # v3.14+ Signed-off-by: Marek Szyprowski <[email protected]> Signed-off-by: Will Deacon <[email protected]> CWE ID: CWE-200
static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) { unsigned long val; void *ptr = NULL; if (!atomic_pool) { WARN(1, "coherent pool not initialised!\n"); return NULL; } val = gen_pool_alloc(atomic_pool, size); if (val) { phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); *ret_page = phys_to_page(phys); ptr = (void *)val; memset(ptr, 0, size); } return ptr; }
167,470
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: ExprResolveBoolean(struct xkb_context *ctx, const ExprDef *expr, bool *set_rtrn) { bool ok = false; const char *ident; switch (expr->expr.op) { case EXPR_VALUE: if (expr->expr.value_type != EXPR_TYPE_BOOLEAN) { log_err(ctx, "Found constant of type %s where boolean was expected\n", expr_value_type_to_string(expr->expr.value_type)); return false; } *set_rtrn = expr->boolean.set; return true; case EXPR_IDENT: ident = xkb_atom_text(ctx, expr->ident.ident); if (ident) { if (istreq(ident, "true") || istreq(ident, "yes") || istreq(ident, "on")) { *set_rtrn = true; return true; } else if (istreq(ident, "false") || istreq(ident, "no") || istreq(ident, "off")) { *set_rtrn = false; return true; } } log_err(ctx, "Identifier \"%s\" of type boolean is unknown\n", ident); return false; case EXPR_FIELD_REF: log_err(ctx, "Default \"%s.%s\" of type boolean is unknown\n", xkb_atom_text(ctx, expr->field_ref.element), xkb_atom_text(ctx, expr->field_ref.field)); return false; case EXPR_INVERT: case EXPR_NOT: ok = ExprResolveBoolean(ctx, expr, set_rtrn); if (ok) *set_rtrn = !*set_rtrn; return ok; case EXPR_ADD: case EXPR_SUBTRACT: case EXPR_MULTIPLY: case EXPR_DIVIDE: case EXPR_ASSIGN: case EXPR_NEGATE: case EXPR_UNARY_PLUS: log_err(ctx, "%s of boolean values not permitted\n", expr_op_type_to_string(expr->expr.op)); break; default: log_wsgo(ctx, "Unknown operator %d in ResolveBoolean\n", expr->expr.op); break; } return false; } Commit Message: xkbcomp: fix stack overflow when evaluating boolean negation The expression evaluator would go into an infinite recursion when evaluating something like this as a boolean: `!True`. Instead of recursing to just `True` and negating, it recursed to `!True` itself again. Bug inherited from xkbcomp. Caught with the afl fuzzer. Signed-off-by: Ran Benita <[email protected]> CWE ID: CWE-400
ExprResolveBoolean(struct xkb_context *ctx, const ExprDef *expr, bool *set_rtrn) { bool ok = false; const char *ident; switch (expr->expr.op) { case EXPR_VALUE: if (expr->expr.value_type != EXPR_TYPE_BOOLEAN) { log_err(ctx, "Found constant of type %s where boolean was expected\n", expr_value_type_to_string(expr->expr.value_type)); return false; } *set_rtrn = expr->boolean.set; return true; case EXPR_IDENT: ident = xkb_atom_text(ctx, expr->ident.ident); if (ident) { if (istreq(ident, "true") || istreq(ident, "yes") || istreq(ident, "on")) { *set_rtrn = true; return true; } else if (istreq(ident, "false") || istreq(ident, "no") || istreq(ident, "off")) { *set_rtrn = false; return true; } } log_err(ctx, "Identifier \"%s\" of type boolean is unknown\n", ident); return false; case EXPR_FIELD_REF: log_err(ctx, "Default \"%s.%s\" of type boolean is unknown\n", xkb_atom_text(ctx, expr->field_ref.element), xkb_atom_text(ctx, expr->field_ref.field)); return false; case EXPR_INVERT: case EXPR_NOT: ok = ExprResolveBoolean(ctx, expr->unary.child, set_rtrn); if (ok) *set_rtrn = !*set_rtrn; return ok; case EXPR_ADD: case EXPR_SUBTRACT: case EXPR_MULTIPLY: case EXPR_DIVIDE: case EXPR_ASSIGN: case EXPR_NEGATE: case EXPR_UNARY_PLUS: log_err(ctx, "%s of boolean values not permitted\n", expr_op_type_to_string(expr->expr.op)); break; default: log_wsgo(ctx, "Unknown operator %d in ResolveBoolean\n", expr->expr.op); break; } return false; }
169,096
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static PHP_FUNCTION(xmlwriter_open_uri) { char *valid_file = NULL; xmlwriter_object *intern; xmlTextWriterPtr ptr; char *source; char resolved_path[MAXPATHLEN + 1]; int source_len; #ifdef ZEND_ENGINE_2 zval *this = getThis(); ze_xmlwriter_object *ze_obj = NULL; #endif #ifndef ZEND_ENGINE_2 xmlOutputBufferPtr out_buffer; void *ioctx; #endif if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &source, &source_len) == FAILURE) { return; } #ifdef ZEND_ENGINE_2 if (this) { /* We do not use XMLWRITER_FROM_OBJECT, xmlwriter init function here */ ze_obj = (ze_xmlwriter_object*) zend_object_store_get_object(this TSRMLS_CC); } #endif if (source_len == 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Empty string as source"); RETURN_FALSE; } valid_file = _xmlwriter_get_valid_file_path(source, resolved_path, MAXPATHLEN TSRMLS_CC); if (!valid_file) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to resolve file path"); RETURN_FALSE; } /* TODO: Fix either the PHP stream or libxml APIs: it can then detect when a given path is valid and not report out of memory error. Once it is done, remove the directory check in _xmlwriter_get_valid_file_path */ #ifndef ZEND_ENGINE_2 ioctx = php_xmlwriter_streams_IO_open_write_wrapper(valid_file TSRMLS_CC); if (ioctx == NULL) { RETURN_FALSE; } out_buffer = xmlOutputBufferCreateIO(php_xmlwriter_streams_IO_write, php_xmlwriter_streams_IO_close, ioctx, NULL); if (out_buffer == NULL) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to create output buffer"); RETURN_FALSE; } ptr = xmlNewTextWriter(out_buffer); #else ptr = xmlNewTextWriterFilename(valid_file, 0); #endif if (!ptr) { RETURN_FALSE; } intern = emalloc(sizeof(xmlwriter_object)); intern->ptr = ptr; intern->output = NULL; #ifndef ZEND_ENGINE_2 intern->uri_output = out_buffer; #else if (this) { if (ze_obj->xmlwriter_ptr) { xmlwriter_free_resource_ptr(ze_obj->xmlwriter_ptr TSRMLS_CC); } ze_obj->xmlwriter_ptr = intern; RETURN_TRUE; } else #endif { ZEND_REGISTER_RESOURCE(return_value,intern,le_xmlwriter); } } Commit Message: CWE ID: CWE-254
static PHP_FUNCTION(xmlwriter_open_uri) { char *valid_file = NULL; xmlwriter_object *intern; xmlTextWriterPtr ptr; char *source; char resolved_path[MAXPATHLEN + 1]; int source_len; #ifdef ZEND_ENGINE_2 zval *this = getThis(); ze_xmlwriter_object *ze_obj = NULL; #endif #ifndef ZEND_ENGINE_2 xmlOutputBufferPtr out_buffer; void *ioctx; #endif if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p", &source, &source_len) == FAILURE) { return; } #ifdef ZEND_ENGINE_2 if (this) { /* We do not use XMLWRITER_FROM_OBJECT, xmlwriter init function here */ ze_obj = (ze_xmlwriter_object*) zend_object_store_get_object(this TSRMLS_CC); } #endif if (source_len == 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Empty string as source"); RETURN_FALSE; } valid_file = _xmlwriter_get_valid_file_path(source, resolved_path, MAXPATHLEN TSRMLS_CC); if (!valid_file) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to resolve file path"); RETURN_FALSE; } /* TODO: Fix either the PHP stream or libxml APIs: it can then detect when a given path is valid and not report out of memory error. Once it is done, remove the directory check in _xmlwriter_get_valid_file_path */ #ifndef ZEND_ENGINE_2 ioctx = php_xmlwriter_streams_IO_open_write_wrapper(valid_file TSRMLS_CC); if (ioctx == NULL) { RETURN_FALSE; } out_buffer = xmlOutputBufferCreateIO(php_xmlwriter_streams_IO_write, php_xmlwriter_streams_IO_close, ioctx, NULL); if (out_buffer == NULL) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to create output buffer"); RETURN_FALSE; } ptr = xmlNewTextWriter(out_buffer); #else ptr = xmlNewTextWriterFilename(valid_file, 0); #endif if (!ptr) { RETURN_FALSE; } intern = emalloc(sizeof(xmlwriter_object)); intern->ptr = ptr; intern->output = NULL; #ifndef ZEND_ENGINE_2 intern->uri_output = out_buffer; #else if (this) { if (ze_obj->xmlwriter_ptr) { xmlwriter_free_resource_ptr(ze_obj->xmlwriter_ptr TSRMLS_CC); } ze_obj->xmlwriter_ptr = intern; RETURN_TRUE; } else #endif { ZEND_REGISTER_RESOURCE(return_value,intern,le_xmlwriter); } }
165,318
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void RenderFrameImpl::DidAddMessageToConsole( const blink::WebConsoleMessage& message, const blink::WebString& source_name, unsigned source_line, const blink::WebString& stack_trace) { logging::LogSeverity log_severity = logging::LOG_VERBOSE; switch (message.level) { case blink::mojom::ConsoleMessageLevel::kVerbose: log_severity = logging::LOG_VERBOSE; break; case blink::mojom::ConsoleMessageLevel::kInfo: log_severity = logging::LOG_INFO; break; case blink::mojom::ConsoleMessageLevel::kWarning: log_severity = logging::LOG_WARNING; break; case blink::mojom::ConsoleMessageLevel::kError: log_severity = logging::LOG_ERROR; break; default: log_severity = logging::LOG_VERBOSE; } if (ShouldReportDetailedMessageForSource(source_name)) { for (auto& observer : observers_) { observer.DetailedConsoleMessageAdded( message.text.Utf16(), source_name.Utf16(), stack_trace.Utf16(), source_line, static_cast<uint32_t>(log_severity)); } } Send(new FrameHostMsg_DidAddMessageToConsole( routing_id_, static_cast<int32_t>(log_severity), message.text.Utf16(), static_cast<int32_t>(source_line), source_name.Utf16())); } Commit Message: Convert FrameHostMsg_DidAddMessageToConsole to Mojo. Note: Since this required changing the test RenderViewImplTest.DispatchBeforeUnloadCanDetachFrame, I manually re-introduced https://crbug.com/666714 locally (the bug the test was added for), and reran the test to confirm that it still covers the bug. Bug: 786836 Change-Id: I110668fa6f0f261fd2ac36bb91a8d8b31c99f4f1 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1526270 Commit-Queue: Lowell Manners <[email protected]> Reviewed-by: Daniel Cheng <[email protected]> Reviewed-by: Camille Lamy <[email protected]> Cr-Commit-Position: refs/heads/master@{#653137} CWE ID: CWE-416
void RenderFrameImpl::DidAddMessageToConsole( const blink::WebConsoleMessage& message, const blink::WebString& source_name, unsigned source_line, const blink::WebString& stack_trace) { logging::LogSeverity log_severity = logging::LOG_VERBOSE; switch (message.level) { case blink::mojom::ConsoleMessageLevel::kVerbose: log_severity = logging::LOG_VERBOSE; break; case blink::mojom::ConsoleMessageLevel::kInfo: log_severity = logging::LOG_INFO; break; case blink::mojom::ConsoleMessageLevel::kWarning: log_severity = logging::LOG_WARNING; break; case blink::mojom::ConsoleMessageLevel::kError: log_severity = logging::LOG_ERROR; break; default: log_severity = logging::LOG_VERBOSE; } if (ShouldReportDetailedMessageForSource(source_name)) { for (auto& observer : observers_) { observer.DetailedConsoleMessageAdded( message.text.Utf16(), source_name.Utf16(), stack_trace.Utf16(), source_line, static_cast<uint32_t>(log_severity)); } } GetFrameHost()->DidAddMessageToConsole(message.level, message.text.Utf16(), static_cast<int32_t>(source_line), source_name.Utf16()); }
172,488
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readargs *args) { unsigned int len; int v; u32 max_blocksize = svc_max_payload(rqstp); p = decode_fh(p, &args->fh); if (!p) return 0; p = xdr_decode_hyper(p, &args->offset); args->count = ntohl(*p++); len = min(args->count, max_blocksize); /* set up the kvec */ v=0; while (len > 0) { struct page *p = *(rqstp->rq_next_page++); rqstp->rq_vec[v].iov_base = page_address(p); rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE); len -= rqstp->rq_vec[v].iov_len; v++; } args->vlen = v; return xdr_argsize_check(rqstp, p); } Commit Message: Merge tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux Pull nfsd updates from Bruce Fields: "Another RDMA update from Chuck Lever, and a bunch of miscellaneous bugfixes" * tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux: (26 commits) nfsd: Fix up the "supattr_exclcreat" attributes nfsd: encoders mustn't use unitialized values in error cases nfsd: fix undefined behavior in nfsd4_layout_verify lockd: fix lockd shutdown race NFSv4: Fix callback server shutdown SUNRPC: Refactor svc_set_num_threads() NFSv4.x/callback: Create the callback service through svc_create_pooled lockd: remove redundant check on block svcrdma: Clean out old XDR encoders svcrdma: Remove the req_map cache svcrdma: Remove unused RDMA Write completion handler svcrdma: Reduce size of sge array in struct svc_rdma_op_ctxt svcrdma: Clean up RPC-over-RDMA backchannel reply processing svcrdma: Report Write/Reply chunk overruns svcrdma: Clean up RDMA_ERROR path svcrdma: Use rdma_rw API in RPC reply path svcrdma: Introduce local rdma_rw API helpers svcrdma: Clean up svc_rdma_get_inv_rkey() svcrdma: Add helper to save pages under I/O svcrdma: Eliminate RPCRDMA_SQ_DEPTH_MULT ... CWE ID: CWE-404
nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_readargs *args) { unsigned int len; int v; u32 max_blocksize = svc_max_payload(rqstp); p = decode_fh(p, &args->fh); if (!p) return 0; p = xdr_decode_hyper(p, &args->offset); args->count = ntohl(*p++); if (!xdr_argsize_check(rqstp, p)) return 0; len = min(args->count, max_blocksize); /* set up the kvec */ v=0; while (len > 0) { struct page *p = *(rqstp->rq_next_page++); rqstp->rq_vec[v].iov_base = page_address(p); rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE); len -= rqstp->rq_vec[v].iov_len; v++; } args->vlen = v; return 1; }
168,140
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static u_char *php_parserr(u_char *cp, querybuf *answer, int type_to_fetch, int store, int raw, zval **subarray) { u_short type, class, dlen; u_long ttl; long n, i; u_short s; u_char *tp, *p; char name[MAXHOSTNAMELEN]; int have_v6_break = 0, in_v6_break = 0; *subarray = NULL; n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, sizeof(name) - 2); if (n < 0) { return NULL; } cp += n; GETSHORT(type, cp); GETSHORT(class, cp); GETLONG(ttl, cp); GETSHORT(dlen, cp); if (type_to_fetch != T_ANY && type != type_to_fetch) { cp += dlen; return cp; } if (!store) { cp += dlen; return cp; } ALLOC_INIT_ZVAL(*subarray); array_init(*subarray); add_assoc_string(*subarray, "host", name, 1); add_assoc_string(*subarray, "class", "IN", 1); add_assoc_long(*subarray, "ttl", ttl); if (raw) { add_assoc_long(*subarray, "type", type); add_assoc_stringl(*subarray, "data", (char*) cp, (uint) dlen, 1); cp += dlen; return cp; } switch (type) { case DNS_T_A: add_assoc_string(*subarray, "type", "A", 1); snprintf(name, sizeof(name), "%d.%d.%d.%d", cp[0], cp[1], cp[2], cp[3]); add_assoc_string(*subarray, "ip", name, 1); cp += dlen; break; case DNS_T_MX: add_assoc_string(*subarray, "type", "MX", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "pri", n); /* no break; */ case DNS_T_CNAME: if (type == DNS_T_CNAME) { add_assoc_string(*subarray, "type", "CNAME", 1); } /* no break; */ case DNS_T_NS: if (type == DNS_T_NS) { add_assoc_string(*subarray, "type", "NS", 1); } /* no break; */ case DNS_T_PTR: if (type == DNS_T_PTR) { add_assoc_string(*subarray, "type", "PTR", 1); } n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "target", name, 1); break; case DNS_T_HINFO: /* See RFC 1010 for values */ add_assoc_string(*subarray, "type", "HINFO", 1); n = *cp & 0xFF; cp++; add_assoc_stringl(*subarray, "cpu", (char*)cp, n, 1); cp += n; n = *cp & 0xFF; cp++; add_assoc_stringl(*subarray, "os", (char*)cp, n, 1); cp += n; break; case DNS_T_TXT: { int ll = 0; zval *entries = NULL; add_assoc_string(*subarray, "type", "TXT", 1); tp = emalloc(dlen + 1); MAKE_STD_ZVAL(entries); array_init(entries); while (ll < dlen) { n = cp[ll]; if ((ll + n) >= dlen) { n = dlen - (ll + 1); } memcpy(tp + ll , cp + ll + 1, n); add_next_index_stringl(entries, cp + ll + 1, n, 1); ll = ll + n + 1; } tp[dlen] = '\0'; cp += dlen; add_assoc_stringl(*subarray, "txt", tp, (dlen>0)?dlen - 1:0, 0); add_assoc_zval(*subarray, "entries", entries); } break; case DNS_T_SOA: add_assoc_string(*subarray, "type", "SOA", 1); n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "mname", name, 1); n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "rname", name, 1); GETLONG(n, cp); add_assoc_long(*subarray, "serial", n); GETLONG(n, cp); add_assoc_long(*subarray, "refresh", n); GETLONG(n, cp); add_assoc_long(*subarray, "retry", n); GETLONG(n, cp); add_assoc_long(*subarray, "expire", n); GETLONG(n, cp); add_assoc_long(*subarray, "minimum-ttl", n); break; case DNS_T_AAAA: tp = (u_char*)name; for(i=0; i < 8; i++) { GETSHORT(s, cp); if (s != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } tp += sprintf((char*)tp,"%x",s); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } } if (have_v6_break && in_v6_break) { tp[0] = ':'; tp++; } tp[0] = '\0'; add_assoc_string(*subarray, "type", "AAAA", 1); add_assoc_string(*subarray, "ipv6", name, 1); break; case DNS_T_A6: p = cp; add_assoc_string(*subarray, "type", "A6", 1); n = ((int)cp[0]) & 0xFF; cp++; add_assoc_long(*subarray, "masklen", n); tp = (u_char*)name; if (n > 15) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } if (n % 16 > 8) { /* Partial short */ if (cp[0] != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } sprintf((char*)tp, "%x", cp[0] & 0xFF); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } cp++; } for (i = (n + 8) / 16; i < 8; i++) { GETSHORT(s, cp); if (s != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } tp += sprintf((char*)tp,"%x",s); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } } if (have_v6_break && in_v6_break) { tp[0] = ':'; tp++; } tp[0] = '\0'; add_assoc_string(*subarray, "ipv6", name, 1); if (cp < p + dlen) { n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "chain", name, 1); } break; case DNS_T_SRV: add_assoc_string(*subarray, "type", "SRV", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "pri", n); GETSHORT(n, cp); add_assoc_long(*subarray, "weight", n); GETSHORT(n, cp); add_assoc_long(*subarray, "port", n); n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "target", name, 1); break; case DNS_T_NAPTR: add_assoc_string(*subarray, "type", "NAPTR", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "order", n); GETSHORT(n, cp); add_assoc_long(*subarray, "pref", n); n = (cp[0] & 0xFF); add_assoc_stringl(*subarray, "flags", (char*)++cp, n, 1); cp += n; n = (cp[0] & 0xFF); add_assoc_stringl(*subarray, "services", (char*)++cp, n, 1); cp += n; n = (cp[0] & 0xFF); add_assoc_stringl(*subarray, "regex", (char*)++cp, n, 1); cp += n; n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "replacement", name, 1); break; default: zval_ptr_dtor(subarray); *subarray = NULL; cp += dlen; break; } return cp; } Commit Message: Fixed Sec Bug #67717 segfault in dns_get_record CVE-2014-3597 Incomplete fix for CVE-2014-4049 Check possible buffer overflow - pass real buffer end to dn_expand calls - check buffer len before each read CWE ID: CWE-119
static u_char *php_parserr(u_char *cp, querybuf *answer, int type_to_fetch, int store, int raw, zval **subarray) static u_char *php_parserr(u_char *cp, u_char *end, querybuf *answer, int type_to_fetch, int store, int raw, zval **subarray) { u_short type, class, dlen; u_long ttl; long n, i; u_short s; u_char *tp, *p; char name[MAXHOSTNAMELEN]; int have_v6_break = 0, in_v6_break = 0; *subarray = NULL; n = dn_expand(answer->qb2, end, cp, name, sizeof(name) - 2); if (n < 0) { return NULL; } cp += n; CHECKCP(10); GETSHORT(type, cp); GETSHORT(class, cp); GETLONG(ttl, cp); GETSHORT(dlen, cp); CHECKCP(dlen); if (type_to_fetch != T_ANY && type != type_to_fetch) { cp += dlen; return cp; } if (!store) { cp += dlen; return cp; } ALLOC_INIT_ZVAL(*subarray); array_init(*subarray); add_assoc_string(*subarray, "host", name, 1); add_assoc_string(*subarray, "class", "IN", 1); add_assoc_long(*subarray, "ttl", ttl); if (raw) { add_assoc_long(*subarray, "type", type); add_assoc_stringl(*subarray, "data", (char*) cp, (uint) dlen, 1); cp += dlen; return cp; } switch (type) { case DNS_T_A: CHECKCP(4); add_assoc_string(*subarray, "type", "A", 1); snprintf(name, sizeof(name), "%d.%d.%d.%d", cp[0], cp[1], cp[2], cp[3]); add_assoc_string(*subarray, "ip", name, 1); cp += dlen; break; case DNS_T_MX: CHECKCP(2); add_assoc_string(*subarray, "type", "MX", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "pri", n); /* no break; */ case DNS_T_CNAME: if (type == DNS_T_CNAME) { add_assoc_string(*subarray, "type", "CNAME", 1); } /* no break; */ case DNS_T_NS: if (type == DNS_T_NS) { add_assoc_string(*subarray, "type", "NS", 1); } /* no break; */ case DNS_T_PTR: if (type == DNS_T_PTR) { add_assoc_string(*subarray, "type", "PTR", 1); } n = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "target", name, 1); break; case DNS_T_HINFO: /* See RFC 1010 for values */ add_assoc_string(*subarray, "type", "HINFO", 1); CHECKCP(1); n = *cp & 0xFF; cp++; CHECKCP(n); add_assoc_stringl(*subarray, "cpu", (char*)cp, n, 1); cp += n; CHECKCP(1); n = *cp & 0xFF; cp++; CHECKCP(n); add_assoc_stringl(*subarray, "os", (char*)cp, n, 1); cp += n; break; case DNS_T_TXT: { int l1 = 0, l2 = 0; zval *entries = NULL; add_assoc_string(*subarray, "type", "TXT", 1); tp = emalloc(dlen + 1); MAKE_STD_ZVAL(entries); array_init(entries); while (l1 < dlen) { n = cp[l1]; if ((l1 + n) >= dlen) { n = dlen - (l1 + 1); } if (n) { memcpy(tp + l2 , cp + l1 + 1, n); add_next_index_stringl(entries, cp + l1 + 1, n, 1); } l1 = l1 + n + 1; l2 = l2 + n; } tp[l2] = '\0'; cp += dlen; add_assoc_stringl(*subarray, "txt", tp, l2, 0); add_assoc_zval(*subarray, "entries", entries); } break; case DNS_T_SOA: add_assoc_string(*subarray, "type", "SOA", 1); n = dn_expand(answer->qb2, end, cp, name, (sizeof name) -2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "mname", name, 1); n = dn_expand(answer->qb2, end, cp, name, (sizeof name) -2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "rname", name, 1); CHECKCP(5*4); GETLONG(n, cp); add_assoc_long(*subarray, "serial", n); GETLONG(n, cp); add_assoc_long(*subarray, "refresh", n); GETLONG(n, cp); add_assoc_long(*subarray, "retry", n); GETLONG(n, cp); add_assoc_long(*subarray, "expire", n); GETLONG(n, cp); add_assoc_long(*subarray, "minimum-ttl", n); break; case DNS_T_AAAA: tp = (u_char*)name; CHECKCP(8*2); for(i=0; i < 8; i++) { GETSHORT(s, cp); if (s != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } tp += sprintf((char*)tp,"%x",s); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } } if (have_v6_break && in_v6_break) { tp[0] = ':'; tp++; } tp[0] = '\0'; add_assoc_string(*subarray, "type", "AAAA", 1); add_assoc_string(*subarray, "ipv6", name, 1); break; case DNS_T_A6: p = cp; add_assoc_string(*subarray, "type", "A6", 1); CHECKCP(1); n = ((int)cp[0]) & 0xFF; cp++; add_assoc_long(*subarray, "masklen", n); tp = (u_char*)name; if (n > 15) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } if (n % 16 > 8) { /* Partial short */ if (cp[0] != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } sprintf((char*)tp, "%x", cp[0] & 0xFF); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } cp++; } for (i = (n + 8) / 16; i < 8; i++) { CHECKCP(2); GETSHORT(s, cp); if (s != 0) { if (tp > (u_char *)name) { in_v6_break = 0; tp[0] = ':'; tp++; } tp += sprintf((char*)tp,"%x",s); } else { if (!have_v6_break) { have_v6_break = 1; in_v6_break = 1; tp[0] = ':'; tp++; } else if (!in_v6_break) { tp[0] = ':'; tp++; tp[0] = '0'; tp++; } } } if (have_v6_break && in_v6_break) { tp[0] = ':'; tp++; } tp[0] = '\0'; add_assoc_string(*subarray, "ipv6", name, 1); if (cp < p + dlen) { n = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "chain", name, 1); } break; case DNS_T_SRV: CHECKCP(3*2); add_assoc_string(*subarray, "type", "SRV", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "pri", n); GETSHORT(n, cp); add_assoc_long(*subarray, "weight", n); GETSHORT(n, cp); add_assoc_long(*subarray, "port", n); n = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "target", name, 1); break; case DNS_T_NAPTR: CHECKCP(2*2); add_assoc_string(*subarray, "type", "NAPTR", 1); GETSHORT(n, cp); add_assoc_long(*subarray, "order", n); GETSHORT(n, cp); add_assoc_long(*subarray, "pref", n); CHECKCP(1); n = (cp[0] & 0xFF); cp++; CHECKCP(n); add_assoc_stringl(*subarray, "flags", (char*)cp, n, 1); cp += n; CHECKCP(1); n = (cp[0] & 0xFF); cp++; CHECKCP(n); add_assoc_stringl(*subarray, "services", (char*)cp, n, 1); cp += n; CHECKCP(1); n = (cp[0] & 0xFF); cp++; CHECKCP(n); add_assoc_stringl(*subarray, "regex", (char*)cp, n, 1); cp += n; n = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2); if (n < 0) { return NULL; } cp += n; add_assoc_string(*subarray, "replacement", name, 1); break; default: zval_ptr_dtor(subarray); *subarray = NULL; cp += dlen; break; } return cp; }
166,354
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: SPL_METHOD(SplFileObject, hasChildren) { if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_FALSE; } /* }}} */ /* {{{ proto bool SplFileObject::getChildren() Commit Message: Fix bug #72262 - do not overflow int CWE ID: CWE-190
SPL_METHOD(SplFileObject, hasChildren) { if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_FALSE; } /* }}} */ /* {{{ proto bool SplFileObject::getChildren()
167,060
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sockaddr_llc *uaddr = (struct sockaddr_llc *)msg->msg_name; const int nonblock = flags & MSG_DONTWAIT; struct sk_buff *skb = NULL; struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); unsigned long cpu_flags; size_t copied = 0; u32 peek_seq = 0; u32 *seq; unsigned long used; int target; /* Read at least this many bytes */ long timeo; lock_sock(sk); copied = -ENOTCONN; if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) goto out; timeo = sock_rcvtimeo(sk, nonblock); seq = &llc->copied_seq; if (flags & MSG_PEEK) { peek_seq = llc->copied_seq; seq = &peek_seq; } target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); copied = 0; do { u32 offset; /* * We need to check signals first, to get correct SIGURG * handling. FIXME: Need to check this doesn't impact 1003.1g * and move it down to the bottom of the loop */ if (signal_pending(current)) { if (copied) break; copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; break; } /* Next get a buffer. */ skb = skb_peek(&sk->sk_receive_queue); if (skb) { offset = *seq; goto found_ok_skb; } /* Well, if we have backlog, try to process it now yet. */ if (copied >= target && !sk->sk_backlog.tail) break; if (copied) { if (sk->sk_err || sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo || (flags & MSG_PEEK)) break; } else { if (sock_flag(sk, SOCK_DONE)) break; if (sk->sk_err) { copied = sock_error(sk); break; } if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_CLOSE) { if (!sock_flag(sk, SOCK_DONE)) { /* * This occurs when user tries to read * from never connected socket. */ copied = -ENOTCONN; break; } break; } if (!timeo) { copied = -EAGAIN; break; } } if (copied >= target) { /* Do not sleep, just process backlog. */ release_sock(sk); lock_sock(sk); } else sk_wait_data(sk, &timeo); if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) { net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n", current->comm, task_pid_nr(current)); peek_seq = llc->copied_seq; } continue; found_ok_skb: /* Ok so how much can we use? */ used = skb->len - offset; if (len < used) used = len; if (!(flags & MSG_TRUNC)) { int rc = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, used); if (rc) { /* Exception. Bailout! */ if (!copied) copied = -EFAULT; break; } } *seq += used; copied += used; len -= used; /* For non stream protcols we get one packet per recvmsg call */ if (sk->sk_type != SOCK_STREAM) goto copy_uaddr; if (!(flags & MSG_PEEK)) { spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); sk_eat_skb(sk, skb, false); spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); *seq = 0; } /* Partial read */ if (used + offset < skb->len) continue; } while (len > 0); out: release_sock(sk); return copied; copy_uaddr: if (uaddr != NULL && skb != NULL) { memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr)); msg->msg_namelen = sizeof(*uaddr); } if (llc_sk(sk)->cmsg_flags) llc_cmsg_rcv(msg, skb); if (!(flags & MSG_PEEK)) { spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); sk_eat_skb(sk, skb, false); spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); *seq = 0; } goto out; } Commit Message: llc: Fix missing msg_namelen update in llc_ui_recvmsg() For stream sockets the code misses to update the msg_namelen member to 0 and therefore makes net/socket.c leak the local, uninitialized sockaddr_storage variable to userland -- 128 bytes of kernel stack memory. The msg_namelen update is also missing for datagram sockets in case the socket is shutting down during receive. Fix both issues by setting msg_namelen to 0 early. It will be updated later if we're going to fill the msg_name member. Cc: Arnaldo Carvalho de Melo <[email protected]> Signed-off-by: Mathias Krause <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-200
static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sockaddr_llc *uaddr = (struct sockaddr_llc *)msg->msg_name; const int nonblock = flags & MSG_DONTWAIT; struct sk_buff *skb = NULL; struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); unsigned long cpu_flags; size_t copied = 0; u32 peek_seq = 0; u32 *seq; unsigned long used; int target; /* Read at least this many bytes */ long timeo; msg->msg_namelen = 0; lock_sock(sk); copied = -ENOTCONN; if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) goto out; timeo = sock_rcvtimeo(sk, nonblock); seq = &llc->copied_seq; if (flags & MSG_PEEK) { peek_seq = llc->copied_seq; seq = &peek_seq; } target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); copied = 0; do { u32 offset; /* * We need to check signals first, to get correct SIGURG * handling. FIXME: Need to check this doesn't impact 1003.1g * and move it down to the bottom of the loop */ if (signal_pending(current)) { if (copied) break; copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; break; } /* Next get a buffer. */ skb = skb_peek(&sk->sk_receive_queue); if (skb) { offset = *seq; goto found_ok_skb; } /* Well, if we have backlog, try to process it now yet. */ if (copied >= target && !sk->sk_backlog.tail) break; if (copied) { if (sk->sk_err || sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo || (flags & MSG_PEEK)) break; } else { if (sock_flag(sk, SOCK_DONE)) break; if (sk->sk_err) { copied = sock_error(sk); break; } if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_CLOSE) { if (!sock_flag(sk, SOCK_DONE)) { /* * This occurs when user tries to read * from never connected socket. */ copied = -ENOTCONN; break; } break; } if (!timeo) { copied = -EAGAIN; break; } } if (copied >= target) { /* Do not sleep, just process backlog. */ release_sock(sk); lock_sock(sk); } else sk_wait_data(sk, &timeo); if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) { net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n", current->comm, task_pid_nr(current)); peek_seq = llc->copied_seq; } continue; found_ok_skb: /* Ok so how much can we use? */ used = skb->len - offset; if (len < used) used = len; if (!(flags & MSG_TRUNC)) { int rc = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, used); if (rc) { /* Exception. Bailout! */ if (!copied) copied = -EFAULT; break; } } *seq += used; copied += used; len -= used; /* For non stream protcols we get one packet per recvmsg call */ if (sk->sk_type != SOCK_STREAM) goto copy_uaddr; if (!(flags & MSG_PEEK)) { spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); sk_eat_skb(sk, skb, false); spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); *seq = 0; } /* Partial read */ if (used + offset < skb->len) continue; } while (len > 0); out: release_sock(sk); return copied; copy_uaddr: if (uaddr != NULL && skb != NULL) { memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr)); msg->msg_namelen = sizeof(*uaddr); } if (llc_sk(sk)->cmsg_flags) llc_cmsg_rcv(msg, skb); if (!(flags & MSG_PEEK)) { spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); sk_eat_skb(sk, skb, false); spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); *seq = 0; } goto out; }
166,036
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: svcauth_gss_accept_sec_context(struct svc_req *rqst, struct rpc_gss_init_res *gr) { struct svc_rpc_gss_data *gd; struct rpc_gss_cred *gc; gss_buffer_desc recv_tok, seqbuf; gss_OID mech; OM_uint32 maj_stat = 0, min_stat = 0, ret_flags, seq; log_debug("in svcauth_gss_accept_context()"); gd = SVCAUTH_PRIVATE(rqst->rq_xprt->xp_auth); gc = (struct rpc_gss_cred *)rqst->rq_clntcred; memset(gr, 0, sizeof(*gr)); /* Deserialize arguments. */ memset(&recv_tok, 0, sizeof(recv_tok)); if (!svc_getargs(rqst->rq_xprt, xdr_rpc_gss_init_args, (caddr_t)&recv_tok)) return (FALSE); gr->gr_major = gss_accept_sec_context(&gr->gr_minor, &gd->ctx, svcauth_gss_creds, &recv_tok, GSS_C_NO_CHANNEL_BINDINGS, &gd->client_name, &mech, &gr->gr_token, &ret_flags, NULL, NULL); svc_freeargs(rqst->rq_xprt, xdr_rpc_gss_init_args, (caddr_t)&recv_tok); log_status("accept_sec_context", gr->gr_major, gr->gr_minor); if (gr->gr_major != GSS_S_COMPLETE && gr->gr_major != GSS_S_CONTINUE_NEEDED) { badauth(gr->gr_major, gr->gr_minor, rqst->rq_xprt); gd->ctx = GSS_C_NO_CONTEXT; goto errout; } /* * ANDROS: krb5 mechglue returns ctx of size 8 - two pointers, * one to the mechanism oid, one to the internal_ctx_id */ if ((gr->gr_ctx.value = mem_alloc(sizeof(gss_union_ctx_id_desc))) == NULL) { fprintf(stderr, "svcauth_gss_accept_context: out of memory\n"); goto errout; } memcpy(gr->gr_ctx.value, gd->ctx, sizeof(gss_union_ctx_id_desc)); gr->gr_ctx.length = sizeof(gss_union_ctx_id_desc); /* gr->gr_win = 0x00000005; ANDROS: for debugging linux kernel version... */ gr->gr_win = sizeof(gd->seqmask) * 8; /* Save client info. */ gd->sec.mech = mech; gd->sec.qop = GSS_C_QOP_DEFAULT; gd->sec.svc = gc->gc_svc; gd->seq = gc->gc_seq; gd->win = gr->gr_win; if (gr->gr_major == GSS_S_COMPLETE) { #ifdef SPKM /* spkm3: no src_name (anonymous) */ if(!g_OID_equal(gss_mech_spkm3, mech)) { #endif maj_stat = gss_display_name(&min_stat, gd->client_name, &gd->cname, &gd->sec.mech); #ifdef SPKM } #endif if (maj_stat != GSS_S_COMPLETE) { log_status("display_name", maj_stat, min_stat); goto errout; } #ifdef DEBUG #ifdef HAVE_HEIMDAL log_debug("accepted context for %.*s with " "<mech {}, qop %d, svc %d>", gd->cname.length, (char *)gd->cname.value, gd->sec.qop, gd->sec.svc); #else { gss_buffer_desc mechname; gss_oid_to_str(&min_stat, mech, &mechname); log_debug("accepted context for %.*s with " "<mech %.*s, qop %d, svc %d>", gd->cname.length, (char *)gd->cname.value, mechname.length, (char *)mechname.value, gd->sec.qop, gd->sec.svc); gss_release_buffer(&min_stat, &mechname); } #endif #endif /* DEBUG */ seq = htonl(gr->gr_win); seqbuf.value = &seq; seqbuf.length = sizeof(seq); gss_release_buffer(&min_stat, &gd->checksum); maj_stat = gss_sign(&min_stat, gd->ctx, GSS_C_QOP_DEFAULT, &seqbuf, &gd->checksum); if (maj_stat != GSS_S_COMPLETE) { goto errout; } rqst->rq_xprt->xp_verf.oa_flavor = RPCSEC_GSS; rqst->rq_xprt->xp_verf.oa_base = gd->checksum.value; rqst->rq_xprt->xp_verf.oa_length = gd->checksum.length; } return (TRUE); errout: gss_release_buffer(&min_stat, &gr->gr_token); return (FALSE); } Commit Message: Fix gssrpc data leakage [CVE-2014-9423] [MITKRB5-SA-2015-001] In svcauth_gss_accept_sec_context(), do not copy bytes from the union context into the handle field we send to the client. We do not use this handle field, so just supply a fixed string of "xxxx". In gss_union_ctx_id_struct, remove the unused "interposer" field which was causing part of the union context to remain uninitialized. ticket: 8058 (new) target_version: 1.13.1 tags: pullup CWE ID: CWE-200
svcauth_gss_accept_sec_context(struct svc_req *rqst, struct rpc_gss_init_res *gr) { struct svc_rpc_gss_data *gd; struct rpc_gss_cred *gc; gss_buffer_desc recv_tok, seqbuf; gss_OID mech; OM_uint32 maj_stat = 0, min_stat = 0, ret_flags, seq; log_debug("in svcauth_gss_accept_context()"); gd = SVCAUTH_PRIVATE(rqst->rq_xprt->xp_auth); gc = (struct rpc_gss_cred *)rqst->rq_clntcred; memset(gr, 0, sizeof(*gr)); /* Deserialize arguments. */ memset(&recv_tok, 0, sizeof(recv_tok)); if (!svc_getargs(rqst->rq_xprt, xdr_rpc_gss_init_args, (caddr_t)&recv_tok)) return (FALSE); gr->gr_major = gss_accept_sec_context(&gr->gr_minor, &gd->ctx, svcauth_gss_creds, &recv_tok, GSS_C_NO_CHANNEL_BINDINGS, &gd->client_name, &mech, &gr->gr_token, &ret_flags, NULL, NULL); svc_freeargs(rqst->rq_xprt, xdr_rpc_gss_init_args, (caddr_t)&recv_tok); log_status("accept_sec_context", gr->gr_major, gr->gr_minor); if (gr->gr_major != GSS_S_COMPLETE && gr->gr_major != GSS_S_CONTINUE_NEEDED) { badauth(gr->gr_major, gr->gr_minor, rqst->rq_xprt); gd->ctx = GSS_C_NO_CONTEXT; goto errout; } gr->gr_ctx.value = "xxxx"; gr->gr_ctx.length = 4; /* gr->gr_win = 0x00000005; ANDROS: for debugging linux kernel version... */ gr->gr_win = sizeof(gd->seqmask) * 8; /* Save client info. */ gd->sec.mech = mech; gd->sec.qop = GSS_C_QOP_DEFAULT; gd->sec.svc = gc->gc_svc; gd->seq = gc->gc_seq; gd->win = gr->gr_win; if (gr->gr_major == GSS_S_COMPLETE) { #ifdef SPKM /* spkm3: no src_name (anonymous) */ if(!g_OID_equal(gss_mech_spkm3, mech)) { #endif maj_stat = gss_display_name(&min_stat, gd->client_name, &gd->cname, &gd->sec.mech); #ifdef SPKM } #endif if (maj_stat != GSS_S_COMPLETE) { log_status("display_name", maj_stat, min_stat); goto errout; } #ifdef DEBUG #ifdef HAVE_HEIMDAL log_debug("accepted context for %.*s with " "<mech {}, qop %d, svc %d>", gd->cname.length, (char *)gd->cname.value, gd->sec.qop, gd->sec.svc); #else { gss_buffer_desc mechname; gss_oid_to_str(&min_stat, mech, &mechname); log_debug("accepted context for %.*s with " "<mech %.*s, qop %d, svc %d>", gd->cname.length, (char *)gd->cname.value, mechname.length, (char *)mechname.value, gd->sec.qop, gd->sec.svc); gss_release_buffer(&min_stat, &mechname); } #endif #endif /* DEBUG */ seq = htonl(gr->gr_win); seqbuf.value = &seq; seqbuf.length = sizeof(seq); gss_release_buffer(&min_stat, &gd->checksum); maj_stat = gss_sign(&min_stat, gd->ctx, GSS_C_QOP_DEFAULT, &seqbuf, &gd->checksum); if (maj_stat != GSS_S_COMPLETE) { goto errout; } rqst->rq_xprt->xp_verf.oa_flavor = RPCSEC_GSS; rqst->rq_xprt->xp_verf.oa_base = gd->checksum.value; rqst->rq_xprt->xp_verf.oa_length = gd->checksum.length; } return (TRUE); errout: gss_release_buffer(&min_stat, &gr->gr_token); return (FALSE); }
166,788
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: set_string_2_svc(sstring_arg *arg, struct svc_req *rqstp) { static generic_ret ret; char *prime_arg; gss_buffer_desc client_name, service_name; OM_uint32 minor_stat; kadm5_server_handle_t handle; const char *errmsg = NULL; xdr_free(xdr_generic_ret, &ret); if ((ret.code = new_server_handle(arg->api_version, rqstp, &handle))) goto exit_func; if ((ret.code = check_handle((void *)handle))) goto exit_func; ret.api_version = handle->api_version; if (setup_gss_names(rqstp, &client_name, &service_name) < 0) { ret.code = KADM5_FAILURE; goto exit_func; } if (krb5_unparse_name(handle->context, arg->princ, &prime_arg)) { ret.code = KADM5_BAD_PRINCIPAL; goto exit_func; } if (CHANGEPW_SERVICE(rqstp) || !kadm5int_acl_check(handle->context, rqst2name(rqstp), ACL_MODIFY, arg->princ, NULL)) { ret.code = KADM5_AUTH_MODIFY; log_unauth("kadm5_mod_strings", prime_arg, &client_name, &service_name, rqstp); } else { ret.code = kadm5_set_string((void *)handle, arg->princ, arg->key, arg->value); if (ret.code != 0) errmsg = krb5_get_error_message(handle->context, ret.code); log_done("kadm5_mod_strings", prime_arg, errmsg, &client_name, &service_name, rqstp); if (errmsg != NULL) krb5_free_error_message(handle->context, errmsg); } free(prime_arg); gss_release_buffer(&minor_stat, &client_name); gss_release_buffer(&minor_stat, &service_name); exit_func: free_server_handle(handle); return &ret; } Commit Message: Fix leaks in kadmin server stubs [CVE-2015-8631] In each kadmind server stub, initialize the client_name and server_name variables, and release them in the cleanup handler. Many of the stubs will otherwise leak the client and server name if krb5_unparse_name() fails. Also make sure to free the prime_arg variables in rename_principal_2_svc(), or we can leak the first one if unparsing the second one fails. Discovered by Simo Sorce. CVE-2015-8631: In all versions of MIT krb5, an authenticated attacker can cause kadmind to leak memory by supplying a null principal name in a request which uses one. Repeating these requests will eventually cause kadmind to exhaust all available memory. CVSSv2 Vector: AV:N/AC:L/Au:S/C:N/I:N/A:C/E:POC/RL:OF/RC:C ticket: 8343 (new) target_version: 1.14-next target_version: 1.13-next tags: pullup CWE ID: CWE-119
set_string_2_svc(sstring_arg *arg, struct svc_req *rqstp) { static generic_ret ret; char *prime_arg; gss_buffer_desc client_name = GSS_C_EMPTY_BUFFER; gss_buffer_desc service_name = GSS_C_EMPTY_BUFFER; OM_uint32 minor_stat; kadm5_server_handle_t handle; const char *errmsg = NULL; xdr_free(xdr_generic_ret, &ret); if ((ret.code = new_server_handle(arg->api_version, rqstp, &handle))) goto exit_func; if ((ret.code = check_handle((void *)handle))) goto exit_func; ret.api_version = handle->api_version; if (setup_gss_names(rqstp, &client_name, &service_name) < 0) { ret.code = KADM5_FAILURE; goto exit_func; } if (krb5_unparse_name(handle->context, arg->princ, &prime_arg)) { ret.code = KADM5_BAD_PRINCIPAL; goto exit_func; } if (CHANGEPW_SERVICE(rqstp) || !kadm5int_acl_check(handle->context, rqst2name(rqstp), ACL_MODIFY, arg->princ, NULL)) { ret.code = KADM5_AUTH_MODIFY; log_unauth("kadm5_mod_strings", prime_arg, &client_name, &service_name, rqstp); } else { ret.code = kadm5_set_string((void *)handle, arg->princ, arg->key, arg->value); if (ret.code != 0) errmsg = krb5_get_error_message(handle->context, ret.code); log_done("kadm5_mod_strings", prime_arg, errmsg, &client_name, &service_name, rqstp); if (errmsg != NULL) krb5_free_error_message(handle->context, errmsg); } free(prime_arg); exit_func: gss_release_buffer(&minor_stat, &client_name); gss_release_buffer(&minor_stat, &service_name); free_server_handle(handle); return &ret; }
167,524
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: MojoResult UnwrapSharedMemoryHandle(ScopedSharedBufferHandle handle, base::SharedMemoryHandle* memory_handle, size_t* size, bool* read_only) { if (!handle.is_valid()) return MOJO_RESULT_INVALID_ARGUMENT; MojoPlatformHandle platform_handle; platform_handle.struct_size = sizeof(MojoPlatformHandle); MojoPlatformSharedBufferHandleFlags flags; size_t num_bytes; MojoSharedBufferGuid mojo_guid; MojoResult result = MojoUnwrapPlatformSharedBufferHandle( handle.release().value(), &platform_handle, &num_bytes, &mojo_guid, &flags); if (result != MOJO_RESULT_OK) return result; if (size) *size = num_bytes; if (read_only) *read_only = flags & MOJO_PLATFORM_SHARED_BUFFER_HANDLE_FLAG_READ_ONLY; base::UnguessableToken guid = base::UnguessableToken::Deserialize(mojo_guid.high, mojo_guid.low); #if defined(OS_MACOSX) && !defined(OS_IOS) DCHECK_EQ(platform_handle.type, MOJO_PLATFORM_HANDLE_TYPE_MACH_PORT); *memory_handle = base::SharedMemoryHandle( static_cast<mach_port_t>(platform_handle.value), num_bytes, guid); #elif defined(OS_FUCHSIA) DCHECK_EQ(platform_handle.type, MOJO_PLATFORM_HANDLE_TYPE_FUCHSIA_HANDLE); *memory_handle = base::SharedMemoryHandle( static_cast<zx_handle_t>(platform_handle.value), num_bytes, guid); #elif defined(OS_POSIX) DCHECK_EQ(platform_handle.type, MOJO_PLATFORM_HANDLE_TYPE_FILE_DESCRIPTOR); *memory_handle = base::SharedMemoryHandle( base::FileDescriptor(static_cast<int>(platform_handle.value), false), num_bytes, guid); #elif defined(OS_WIN) DCHECK_EQ(platform_handle.type, MOJO_PLATFORM_HANDLE_TYPE_WINDOWS_HANDLE); *memory_handle = base::SharedMemoryHandle( reinterpret_cast<HANDLE>(platform_handle.value), num_bytes, guid); #endif return MOJO_RESULT_OK; } Commit Message: Correct mojo::WrapSharedMemoryHandle usage Fixes some incorrect uses of mojo::WrapSharedMemoryHandle which were assuming that the call actually has any control over the memory protection applied to a handle when mapped. Where fixing usage is infeasible for this CL, TODOs are added to annotate follow-up work. Also updates the API and documentation to (hopefully) improve clarity and avoid similar mistakes from being made in the future. BUG=792900 Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel Change-Id: I0578aaa9ca3bfcb01aaf2451315d1ede95458477 Reviewed-on: https://chromium-review.googlesource.com/818282 Reviewed-by: Wei Li <[email protected]> Reviewed-by: Lei Zhang <[email protected]> Reviewed-by: John Abd-El-Malek <[email protected]> Reviewed-by: Daniel Cheng <[email protected]> Reviewed-by: Sadrul Chowdhury <[email protected]> Reviewed-by: Yuzhu Shen <[email protected]> Reviewed-by: Robert Sesek <[email protected]> Commit-Queue: Ken Rockot <[email protected]> Cr-Commit-Position: refs/heads/master@{#530268} CWE ID: CWE-787
MojoResult UnwrapSharedMemoryHandle(ScopedSharedBufferHandle handle, MojoResult UnwrapSharedMemoryHandle( ScopedSharedBufferHandle handle, base::SharedMemoryHandle* memory_handle, size_t* size, UnwrappedSharedMemoryHandleProtection* protection) { if (!handle.is_valid()) return MOJO_RESULT_INVALID_ARGUMENT; MojoPlatformHandle platform_handle; platform_handle.struct_size = sizeof(MojoPlatformHandle); MojoPlatformSharedBufferHandleFlags flags; size_t num_bytes; MojoSharedBufferGuid mojo_guid; MojoResult result = MojoUnwrapPlatformSharedBufferHandle( handle.release().value(), &platform_handle, &num_bytes, &mojo_guid, &flags); if (result != MOJO_RESULT_OK) return result; if (size) *size = num_bytes; if (protection) { *protection = flags & MOJO_PLATFORM_SHARED_BUFFER_HANDLE_FLAG_HANDLE_IS_READ_ONLY ? UnwrappedSharedMemoryHandleProtection::kReadOnly : UnwrappedSharedMemoryHandleProtection::kReadWrite; } base::UnguessableToken guid = base::UnguessableToken::Deserialize(mojo_guid.high, mojo_guid.low); #if defined(OS_MACOSX) && !defined(OS_IOS) DCHECK_EQ(platform_handle.type, MOJO_PLATFORM_HANDLE_TYPE_MACH_PORT); *memory_handle = base::SharedMemoryHandle( static_cast<mach_port_t>(platform_handle.value), num_bytes, guid); #elif defined(OS_FUCHSIA) DCHECK_EQ(platform_handle.type, MOJO_PLATFORM_HANDLE_TYPE_FUCHSIA_HANDLE); *memory_handle = base::SharedMemoryHandle( static_cast<zx_handle_t>(platform_handle.value), num_bytes, guid); #elif defined(OS_POSIX) DCHECK_EQ(platform_handle.type, MOJO_PLATFORM_HANDLE_TYPE_FILE_DESCRIPTOR); *memory_handle = base::SharedMemoryHandle( base::FileDescriptor(static_cast<int>(platform_handle.value), false), num_bytes, guid); #elif defined(OS_WIN) DCHECK_EQ(platform_handle.type, MOJO_PLATFORM_HANDLE_TYPE_WINDOWS_HANDLE); *memory_handle = base::SharedMemoryHandle( reinterpret_cast<HANDLE>(platform_handle.value), num_bytes, guid); #endif return MOJO_RESULT_OK; }
172,884
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: decrypt_response(struct sc_card *card, unsigned char *in, unsigned char *out, size_t * out_len) { size_t in_len; size_t i; unsigned char iv[16] = { 0 }; unsigned char plaintext[4096] = { 0 }; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; /* no cipher */ if (in[0] == 0x99) return 0; /* parse cipher length */ if (0x01 == in[2] && 0x82 != in[1]) { in_len = in[1]; i = 3; } else if (0x01 == in[3] && 0x81 == in[1]) { in_len = in[2]; i = 4; } else if (0x01 == in[4] && 0x82 == in[1]) { in_len = in[2] * 0x100; in_len += in[3]; i = 5; } else { return -1; } /* decrypt */ if (KEY_TYPE_AES == exdata->smtype) aes128_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], in_len - 1, plaintext); else des3_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], in_len - 1, plaintext); /* unpadding */ while (0x80 != plaintext[in_len - 2] && (in_len - 2 > 0)) in_len--; if (2 == in_len) return -1; memcpy(out, plaintext, in_len - 2); *out_len = in_len - 2; return 0; } Commit Message: fixed out of bounds reads Thanks to Eric Sesterhenn from X41 D-SEC GmbH for reporting and suggesting security fixes. CWE ID: CWE-125
decrypt_response(struct sc_card *card, unsigned char *in, unsigned char *out, size_t * out_len) decrypt_response(struct sc_card *card, unsigned char *in, size_t inlen, unsigned char *out, size_t * out_len) { size_t cipher_len; size_t i; unsigned char iv[16] = { 0 }; unsigned char plaintext[4096] = { 0 }; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; /* no cipher */ if (in[0] == 0x99) return 0; /* parse cipher length */ if (0x01 == in[2] && 0x82 != in[1]) { cipher_len = in[1]; i = 3; } else if (0x01 == in[3] && 0x81 == in[1]) { cipher_len = in[2]; i = 4; } else if (0x01 == in[4] && 0x82 == in[1]) { cipher_len = in[2] * 0x100; cipher_len += in[3]; i = 5; } else { return -1; } if (cipher_len < 2 || i+cipher_len > inlen || cipher_len > sizeof plaintext) return -1; /* decrypt */ if (KEY_TYPE_AES == exdata->smtype) aes128_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); else des3_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); /* unpadding */ while (0x80 != plaintext[cipher_len - 2] && (cipher_len - 2 > 0)) cipher_len--; if (2 == cipher_len) return -1; memcpy(out, plaintext, cipher_len - 2); *out_len = cipher_len - 2; return 0; }
169,054
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op, ExceptionInfo *exception) { #define ComplexImageTag "Complex/Image" CacheView *Ai_view, *Ar_view, *Bi_view, *Br_view, *Ci_view, *Cr_view; const char *artifact; const Image *Ai_image, *Ar_image, *Bi_image, *Br_image; double snr; Image *Ci_image, *complex_images, *Cr_image, *image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (images->next == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",images->filename); return((Image *) NULL); } image=CloneImage(images,0,0,MagickTrue,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass) == MagickFalse) { image=DestroyImageList(image); return(image); } image->depth=32UL; complex_images=NewImageList(); AppendImageToList(&complex_images,image); image=CloneImage(images,0,0,MagickTrue,exception); if (image == (Image *) NULL) { complex_images=DestroyImageList(complex_images); return(complex_images); } AppendImageToList(&complex_images,image); /* Apply complex mathematics to image pixels. */ artifact=GetImageArtifact(image,"complex:snr"); snr=0.0; if (artifact != (const char *) NULL) snr=StringToDouble(artifact,(char **) NULL); Ar_image=images; Ai_image=images->next; Br_image=images; Bi_image=images->next; if ((images->next->next != (Image *) NULL) && (images->next->next->next != (Image *) NULL)) { Br_image=images->next->next; Bi_image=images->next->next->next; } Cr_image=complex_images; Ci_image=complex_images->next; Ar_view=AcquireVirtualCacheView(Ar_image,exception); Ai_view=AcquireVirtualCacheView(Ai_image,exception); Br_view=AcquireVirtualCacheView(Br_image,exception); Bi_view=AcquireVirtualCacheView(Bi_image,exception); Cr_view=AcquireAuthenticCacheView(Cr_image,exception); Ci_view=AcquireAuthenticCacheView(Ci_image,exception); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(images,complex_images,images->rows,1L) #endif for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *magick_restrict Ai, *magick_restrict Ar, *magick_restrict Bi, *magick_restrict Br; register PixelPacket *magick_restrict Ci, *magick_restrict Cr; register ssize_t x; if (status == MagickFalse) continue; Ar=GetCacheViewVirtualPixels(Ar_view,0,y, MagickMax(Ar_image->columns,Cr_image->columns),1,exception); Ai=GetCacheViewVirtualPixels(Ai_view,0,y, MagickMax(Ai_image->columns,Ci_image->columns),1,exception); Br=GetCacheViewVirtualPixels(Br_view,0,y, MagickMax(Br_image->columns,Cr_image->columns),1,exception); Bi=GetCacheViewVirtualPixels(Bi_view,0,y, MagickMax(Bi_image->columns,Ci_image->columns),1,exception); Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,Cr_image->columns,1,exception); Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,Ci_image->columns,1,exception); if ((Ar == (const PixelPacket *) NULL) || (Ai == (const PixelPacket *) NULL) || (Br == (const PixelPacket *) NULL) || (Bi == (const PixelPacket *) NULL) || (Cr == (PixelPacket *) NULL) || (Ci == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) images->columns; x++) { switch (op) { case AddComplexOperator: { Cr->red=Ar->red+Br->red; Ci->red=Ai->red+Bi->red; Cr->green=Ar->green+Br->green; Ci->green=Ai->green+Bi->green; Cr->blue=Ar->blue+Br->blue; Ci->blue=Ai->blue+Bi->blue; if (images->matte != MagickFalse) { Cr->opacity=Ar->opacity+Br->opacity; Ci->opacity=Ai->opacity+Bi->opacity; } break; } case ConjugateComplexOperator: default: { Cr->red=Ar->red; Ci->red=(-Bi->red); Cr->green=Ar->green; Ci->green=(-Bi->green); Cr->blue=Ar->blue; Ci->blue=(-Bi->blue); if (images->matte != MagickFalse) { Cr->opacity=Ar->opacity; Ci->opacity=(-Bi->opacity); } break; } case DivideComplexOperator: { double gamma; gamma=PerceptibleReciprocal(Br->red*Br->red+Bi->red*Bi->red+snr); Cr->red=gamma*(Ar->red*Br->red+Ai->red*Bi->red); Ci->red=gamma*(Ai->red*Br->red-Ar->red*Bi->red); gamma=PerceptibleReciprocal(Br->green*Br->green+Bi->green*Bi->green+ snr); Cr->green=gamma*(Ar->green*Br->green+Ai->green*Bi->green); Ci->green=gamma*(Ai->green*Br->green-Ar->green*Bi->green); gamma=PerceptibleReciprocal(Br->blue*Br->blue+Bi->blue*Bi->blue+snr); Cr->blue=gamma*(Ar->blue*Br->blue+Ai->blue*Bi->blue); Ci->blue=gamma*(Ai->blue*Br->blue-Ar->blue*Bi->blue); if (images->matte != MagickFalse) { gamma=PerceptibleReciprocal(Br->opacity*Br->opacity+Bi->opacity* Bi->opacity+snr); Cr->opacity=gamma*(Ar->opacity*Br->opacity+Ai->opacity* Bi->opacity); Ci->opacity=gamma*(Ai->opacity*Br->opacity-Ar->opacity* Bi->opacity); } break; } case MagnitudePhaseComplexOperator: { Cr->red=sqrt(Ar->red*Ar->red+Ai->red*Ai->red); Ci->red=atan2(Ai->red,Ar->red)/(2.0*MagickPI)+0.5; Cr->green=sqrt(Ar->green*Ar->green+Ai->green*Ai->green); Ci->green=atan2(Ai->green,Ar->green)/(2.0*MagickPI)+0.5; Cr->blue=sqrt(Ar->blue*Ar->blue+Ai->blue*Ai->blue); Ci->blue=atan2(Ai->blue,Ar->blue)/(2.0*MagickPI)+0.5; if (images->matte != MagickFalse) { Cr->opacity=sqrt(Ar->opacity*Ar->opacity+Ai->opacity*Ai->opacity); Ci->opacity=atan2(Ai->opacity,Ar->opacity)/(2.0*MagickPI)+0.5; } break; } case MultiplyComplexOperator: { Cr->red=QuantumScale*(Ar->red*Br->red-Ai->red*Bi->red); Ci->red=QuantumScale*(Ai->red*Br->red+Ar->red*Bi->red); Cr->green=QuantumScale*(Ar->green*Br->green-Ai->green*Bi->green); Ci->green=QuantumScale*(Ai->green*Br->green+Ar->green*Bi->green); Cr->blue=QuantumScale*(Ar->blue*Br->blue-Ai->blue*Bi->blue); Ci->blue=QuantumScale*(Ai->blue*Br->blue+Ar->blue*Bi->blue); if (images->matte != MagickFalse) { Cr->opacity=QuantumScale*(Ar->opacity*Br->opacity-Ai->opacity* Bi->opacity); Ci->opacity=QuantumScale*(Ai->opacity*Br->opacity+Ar->opacity* Bi->opacity); } break; } case RealImaginaryComplexOperator: { Cr->red=Ar->red*cos(2.0*MagickPI*(Ai->red-0.5)); Ci->red=Ar->red*sin(2.0*MagickPI*(Ai->red-0.5)); Cr->green=Ar->green*cos(2.0*MagickPI*(Ai->green-0.5)); Ci->green=Ar->green*sin(2.0*MagickPI*(Ai->green-0.5)); Cr->blue=Ar->blue*cos(2.0*MagickPI*(Ai->blue-0.5)); Ci->blue=Ar->blue*sin(2.0*MagickPI*(Ai->blue-0.5)); if (images->matte != MagickFalse) { Cr->opacity=Ar->opacity*cos(2.0*MagickPI*(Ai->opacity-0.5)); Ci->opacity=Ar->opacity*sin(2.0*MagickPI*(Ai->opacity-0.5)); } break; } case SubtractComplexOperator: { Cr->red=Ar->red-Br->red; Ci->red=Ai->red-Bi->red; Cr->green=Ar->green-Br->green; Ci->green=Ai->green-Bi->green; Cr->blue=Ar->blue-Br->blue; Ci->blue=Ai->blue-Bi->blue; if (images->matte != MagickFalse) { Cr->opacity=Ar->opacity-Br->opacity; Ci->opacity=Ai->opacity-Bi->opacity; } break; } } Ar++; Ai++; Br++; Bi++; Cr++; Ci++; } if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse) status=MagickFalse; if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,ComplexImageTag,progress,images->rows); if (proceed == MagickFalse) status=MagickFalse; } } Cr_view=DestroyCacheView(Cr_view); Ci_view=DestroyCacheView(Ci_view); Br_view=DestroyCacheView(Br_view); Bi_view=DestroyCacheView(Bi_view); Ar_view=DestroyCacheView(Ar_view); Ai_view=DestroyCacheView(Ai_view); if (status == MagickFalse) complex_images=DestroyImageList(complex_images); return(complex_images); } Commit Message: https://github.com/ImageMagick/ImageMagick/issues/1595 CWE ID: CWE-119
MagickExport Image *ComplexImages(const Image *images,const ComplexOperator op, ExceptionInfo *exception) { #define ComplexImageTag "Complex/Image" CacheView *Ai_view, *Ar_view, *Bi_view, *Br_view, *Ci_view, *Cr_view; const char *artifact; const Image *Ai_image, *Ar_image, *Bi_image, *Br_image; double snr; Image *Ci_image, *complex_images, *Cr_image, *image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (images->next == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",images->filename); return((Image *) NULL); } image=CloneImage(images,0,0,MagickTrue,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass) == MagickFalse) { image=DestroyImageList(image); return(image); } image->depth=32UL; complex_images=NewImageList(); AppendImageToList(&complex_images,image); image=CloneImage(images,0,0,MagickTrue,exception); if (image == (Image *) NULL) { complex_images=DestroyImageList(complex_images); return(complex_images); } AppendImageToList(&complex_images,image); /* Apply complex mathematics to image pixels. */ artifact=GetImageArtifact(image,"complex:snr"); snr=0.0; if (artifact != (const char *) NULL) snr=StringToDouble(artifact,(char **) NULL); Ar_image=images; Ai_image=images->next; Br_image=images; Bi_image=images->next; if ((images->next->next != (Image *) NULL) && (images->next->next->next != (Image *) NULL)) { Br_image=images->next->next; Bi_image=images->next->next->next; } Cr_image=complex_images; Ci_image=complex_images->next; Ar_view=AcquireVirtualCacheView(Ar_image,exception); Ai_view=AcquireVirtualCacheView(Ai_image,exception); Br_view=AcquireVirtualCacheView(Br_image,exception); Bi_view=AcquireVirtualCacheView(Bi_image,exception); Cr_view=AcquireAuthenticCacheView(Cr_image,exception); Ci_view=AcquireAuthenticCacheView(Ci_image,exception); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(Cr_image,complex_images,Cr_image->rows,1L) #endif for (y=0; y < (ssize_t) Cr_image->rows; y++) { register const PixelPacket *magick_restrict Ai, *magick_restrict Ar, *magick_restrict Bi, *magick_restrict Br; register PixelPacket *magick_restrict Ci, *magick_restrict Cr; register ssize_t x; if (status == MagickFalse) continue; Ar=GetCacheViewVirtualPixels(Ar_view,0,y,Cr_image->columns,1,exception); Ai=GetCacheViewVirtualPixels(Ai_view,0,y,Cr_image->columns,1,exception); Br=GetCacheViewVirtualPixels(Br_view,0,y,Cr_image->columns,1,exception); Bi=GetCacheViewVirtualPixels(Bi_view,0,y,Cr_image->columns,1,exception); Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,Cr_image->columns,1,exception); Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,Ci_image->columns,1,exception); if ((Ar == (const PixelPacket *) NULL) || (Ai == (const PixelPacket *) NULL) || (Br == (const PixelPacket *) NULL) || (Bi == (const PixelPacket *) NULL) || (Cr == (PixelPacket *) NULL) || (Ci == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) Cr_image->columns; x++) { switch (op) { case AddComplexOperator: { Cr->red=Ar->red+Br->red; Ci->red=Ai->red+Bi->red; Cr->green=Ar->green+Br->green; Ci->green=Ai->green+Bi->green; Cr->blue=Ar->blue+Br->blue; Ci->blue=Ai->blue+Bi->blue; if (images->matte != MagickFalse) { Cr->opacity=Ar->opacity+Br->opacity; Ci->opacity=Ai->opacity+Bi->opacity; } break; } case ConjugateComplexOperator: default: { Cr->red=Ar->red; Ci->red=(-Bi->red); Cr->green=Ar->green; Ci->green=(-Bi->green); Cr->blue=Ar->blue; Ci->blue=(-Bi->blue); if (images->matte != MagickFalse) { Cr->opacity=Ar->opacity; Ci->opacity=(-Bi->opacity); } break; } case DivideComplexOperator: { double gamma; gamma=PerceptibleReciprocal(Br->red*Br->red+Bi->red*Bi->red+snr); Cr->red=gamma*((double) Ar->red*Br->red+(double) Ai->red*Bi->red); Ci->red=gamma*((double) Ai->red*Br->red-(double) Ar->red*Bi->red); gamma=PerceptibleReciprocal((double) Br->green*Br->green+(double) Bi->green*Bi->green+snr); Cr->green=gamma*((double) Ar->green*Br->green+(double) Ai->green*Bi->green); Ci->green=gamma*((double) Ai->green*Br->green-(double) Ar->green*Bi->green); gamma=PerceptibleReciprocal((double) Br->blue*Br->blue+(double) Bi->blue*Bi->blue+snr); Cr->blue=gamma*((double) Ar->blue*Br->blue+(double) Ai->blue*Bi->blue); Ci->blue=gamma*((double) Ai->blue*Br->blue-(double) Ar->blue*Bi->blue); if (images->matte != MagickFalse) { gamma=PerceptibleReciprocal((double) Br->opacity*Br->opacity+ (double) Bi->opacity*Bi->opacity+snr); Cr->opacity=gamma*((double) Ar->opacity*Br->opacity+(double) Ai->opacity*Bi->opacity); Ci->opacity=gamma*((double) Ai->opacity*Br->opacity-(double) Ar->opacity*Bi->opacity); } break; } case MagnitudePhaseComplexOperator: { Cr->red=sqrt((double) Ar->red*Ar->red+(double) Ai->red*Ai->red); Ci->red=atan2((double) Ai->red,(double) Ar->red)/(2.0*MagickPI)+0.5; Cr->green=sqrt((double) Ar->green*Ar->green+(double) Ai->green*Ai->green); Ci->green=atan2((double) Ai->green,(double) Ar->green)/ (2.0*MagickPI)+0.5; Cr->blue=sqrt((double) Ar->blue*Ar->blue+(double) Ai->blue*Ai->blue); Ci->blue=atan2(Ai->blue,Ar->blue)/(2.0*MagickPI)+0.5; if (images->matte != MagickFalse) { Cr->opacity=sqrt((double) Ar->opacity*Ar->opacity+(double) Ai->opacity*Ai->opacity); Ci->opacity=atan2((double) Ai->opacity,(double) Ar->opacity)/ (2.0*MagickPI)+0.5; } break; } case MultiplyComplexOperator: { Cr->red=QuantumScale*((double) Ar->red*Br->red-(double) Ai->red*Bi->red); Ci->red=QuantumScale*((double) Ai->red*Br->red+(double) Ar->red*Bi->red); Cr->green=QuantumScale*((double) Ar->green*Br->green-(double) Ai->green*Bi->green); Ci->green=QuantumScale*((double) Ai->green*Br->green+(double) Ar->green*Bi->green); Cr->blue=QuantumScale*((double) Ar->blue*Br->blue-(double) Ai->blue*Bi->blue); Ci->blue=QuantumScale*((double) Ai->blue*Br->blue+(double) Ar->blue*Bi->blue); if (images->matte != MagickFalse) { Cr->opacity=QuantumScale*((double) Ar->opacity*Br->opacity- (double) Ai->opacity*Bi->opacity); Ci->opacity=QuantumScale*((double) Ai->opacity*Br->opacity+ (double) Ar->opacity*Bi->opacity); } break; } case RealImaginaryComplexOperator: { Cr->red=Ar->red*cos(2.0*MagickPI*(Ai->red-0.5)); Ci->red=Ar->red*sin(2.0*MagickPI*(Ai->red-0.5)); Cr->green=Ar->green*cos(2.0*MagickPI*(Ai->green-0.5)); Ci->green=Ar->green*sin(2.0*MagickPI*(Ai->green-0.5)); Cr->blue=Ar->blue*cos(2.0*MagickPI*(Ai->blue-0.5)); Ci->blue=Ar->blue*sin(2.0*MagickPI*(Ai->blue-0.5)); if (images->matte != MagickFalse) { Cr->opacity=Ar->opacity*cos(2.0*MagickPI*(Ai->opacity-0.5)); Ci->opacity=Ar->opacity*sin(2.0*MagickPI*(Ai->opacity-0.5)); } break; } case SubtractComplexOperator: { Cr->red=Ar->red-Br->red; Ci->red=Ai->red-Bi->red; Cr->green=Ar->green-Br->green; Ci->green=Ai->green-Bi->green; Cr->blue=Ar->blue-Br->blue; Ci->blue=Ai->blue-Bi->blue; if (Cr_image->matte != MagickFalse) { Cr->opacity=Ar->opacity-Br->opacity; Ci->opacity=Ai->opacity-Bi->opacity; } break; } } Ar++; Ai++; Br++; Bi++; Cr++; Ci++; } if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse) status=MagickFalse; if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,ComplexImageTag,progress,images->rows); if (proceed == MagickFalse) status=MagickFalse; } } Cr_view=DestroyCacheView(Cr_view); Ci_view=DestroyCacheView(Ci_view); Br_view=DestroyCacheView(Br_view); Bi_view=DestroyCacheView(Bi_view); Ar_view=DestroyCacheView(Ar_view); Ai_view=DestroyCacheView(Ai_view); if (status == MagickFalse) complex_images=DestroyImageList(complex_images); return(complex_images); }
169,594
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool asn1_write_BOOLEAN_context(struct asn1_data *data, bool v, int context) { asn1_push_tag(data, ASN1_CONTEXT_SIMPLE(context)); asn1_write_uint8(data, v ? 0xFF : 0); asn1_pop_tag(data); return !data->has_error; } Commit Message: CWE ID: CWE-399
bool asn1_write_BOOLEAN_context(struct asn1_data *data, bool v, int context) { if (!asn1_push_tag(data, ASN1_CONTEXT_SIMPLE(context))) return false; if (!asn1_write_uint8(data, v ? 0xFF : 0)) return false; return asn1_pop_tag(data); }
164,586
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void AppCacheUpdateJob::OnManifestDataWriteComplete(int result) { if (result > 0) { AppCacheEntry entry(AppCacheEntry::MANIFEST, manifest_response_writer_->response_id(), manifest_response_writer_->amount_written()); if (!inprogress_cache_->AddOrModifyEntry(manifest_url_, entry)) duplicate_response_ids_.push_back(entry.response_id()); StoreGroupAndCache(); } else { HandleCacheFailure( blink::mojom::AppCacheErrorDetails( "Failed to write the manifest data to storage", blink::mojom::AppCacheErrorReason::APPCACHE_UNKNOWN_ERROR, GURL(), 0, false /*is_cross_origin*/), DISKCACHE_ERROR, GURL()); } } Commit Message: Reland "AppCache: Add padding to cross-origin responses." This is a reland of 85b389caa7d725cdd31f59e9a2b79ff54804b7b7 Initialized CacheRecord::padding_size to 0. Original change's description: > AppCache: Add padding to cross-origin responses. > > Bug: 918293 > Change-Id: I4f16640f06feac009d6bbbb624951da6d2669f6c > Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1488059 > Commit-Queue: Staphany Park <[email protected]> > Reviewed-by: Victor Costan <[email protected]> > Reviewed-by: Marijn Kruisselbrink <[email protected]> > Cr-Commit-Position: refs/heads/master@{#644624} Bug: 918293 Change-Id: Ie1d3f99c7e8a854d33255a4d66243da2ce16441c Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1539906 Reviewed-by: Victor Costan <[email protected]> Commit-Queue: Staphany Park <[email protected]> Cr-Commit-Position: refs/heads/master@{#644719} CWE ID: CWE-200
void AppCacheUpdateJob::OnManifestDataWriteComplete(int result) { if (result > 0) { // The manifest determines the cache's origin, so the manifest entry is // always same-origin, and thus does not require padding. AppCacheEntry entry(AppCacheEntry::MANIFEST, manifest_response_writer_->response_id(), manifest_response_writer_->amount_written(), /*padding_size=*/0); if (!inprogress_cache_->AddOrModifyEntry(manifest_url_, entry)) duplicate_response_ids_.push_back(entry.response_id()); StoreGroupAndCache(); } else { HandleCacheFailure( blink::mojom::AppCacheErrorDetails( "Failed to write the manifest data to storage", blink::mojom::AppCacheErrorReason::APPCACHE_UNKNOWN_ERROR, GURL(), 0, false /*is_cross_origin*/), DISKCACHE_ERROR, GURL()); } }
172,997
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) { unsigned char present = 0; struct page *page; /* * When tmpfs swaps out a page from a file, any process mapping that * file will not get a swp_entry_t in its pte, but rather it is like * any other file mapping (ie. marked !present and faulted in with * tmpfs's .fault). So swapped out tmpfs mappings are tested here. */ #ifdef CONFIG_SWAP if (shmem_mapping(mapping)) { page = find_get_entry(mapping, pgoff); /* * shmem/tmpfs may return swap: account for swapcache * page too. */ if (xa_is_value(page)) { swp_entry_t swp = radix_to_swp_entry(page); page = find_get_page(swap_address_space(swp), swp_offset(swp)); } } else page = find_get_page(mapping, pgoff); #else page = find_get_page(mapping, pgoff); #endif if (page) { present = PageUptodate(page); put_page(page); } return present; } Commit Message: Change mincore() to count "mapped" pages rather than "cached" pages The semantics of what "in core" means for the mincore() system call are somewhat unclear, but Linux has always (since 2.3.52, which is when mincore() was initially done) treated it as "page is available in page cache" rather than "page is mapped in the mapping". The problem with that traditional semantic is that it exposes a lot of system cache state that it really probably shouldn't, and that users shouldn't really even care about. So let's try to avoid that information leak by simply changing the semantics to be that mincore() counts actual mapped pages, not pages that might be cheaply mapped if they were faulted (note the "might be" part of the old semantics: being in the cache doesn't actually guarantee that you can access them without IO anyway, since things like network filesystems may have to revalidate the cache before use). In many ways the old semantics were somewhat insane even aside from the information leak issue. From the very beginning (and that beginning is a long time ago: 2.3.52 was released in March 2000, I think), the code had a comment saying Later we can get more picky about what "in core" means precisely. and this is that "later". Admittedly it is much later than is really comfortable. NOTE! This is a real semantic change, and it is for example known to change the output of "fincore", since that program literally does a mmmap without populating it, and then doing "mincore()" on that mapping that doesn't actually have any pages in it. I'm hoping that nobody actually has any workflow that cares, and the info leak is real. We may have to do something different if it turns out that people have valid reasons to want the old semantics, and if we can limit the information leak sanely. Cc: Kevin Easton <[email protected]> Cc: Jiri Kosina <[email protected]> Cc: Masatake YAMATO <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Greg KH <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Michal Hocko <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> CWE ID: CWE-200
static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
169,746
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) { int error; int len; char *data; const char *name = gfs2_acl_name(type); if (acl && acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode))) return -E2BIG; if (type == ACL_TYPE_ACCESS) { umode_t mode = inode->i_mode; error = posix_acl_equiv_mode(acl, &mode); if (error < 0) return error; if (error == 0) acl = NULL; if (mode != inode->i_mode) { inode->i_mode = mode; mark_inode_dirty(inode); } } if (acl) { len = posix_acl_to_xattr(&init_user_ns, acl, NULL, 0); if (len == 0) return 0; data = kmalloc(len, GFP_NOFS); if (data == NULL) return -ENOMEM; error = posix_acl_to_xattr(&init_user_ns, acl, data, len); if (error < 0) goto out; } else { data = NULL; len = 0; } error = __gfs2_xattr_set(inode, name, data, len, 0, GFS2_EATYPE_SYS); if (error) goto out; set_cached_acl(inode, type, acl); out: kfree(data); return error; } Commit Message: posix_acl: Clear SGID bit when setting file permissions When file permissions are modified via chmod(2) and the user is not in the owning group or capable of CAP_FSETID, the setgid bit is cleared in inode_change_ok(). Setting a POSIX ACL via setxattr(2) sets the file permissions as well as the new ACL, but doesn't clear the setgid bit in a similar way; this allows to bypass the check in chmod(2). Fix that. References: CVE-2016-7097 Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: Jeff Layton <[email protected]> Signed-off-by: Jan Kara <[email protected]> Signed-off-by: Andreas Gruenbacher <[email protected]> CWE ID: CWE-285
int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) { int error; int len; char *data; const char *name = gfs2_acl_name(type); if (acl && acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode))) return -E2BIG; if (type == ACL_TYPE_ACCESS) { umode_t mode = inode->i_mode; error = posix_acl_update_mode(inode, &inode->i_mode, &acl); if (error) return error; if (mode != inode->i_mode) mark_inode_dirty(inode); } if (acl) { len = posix_acl_to_xattr(&init_user_ns, acl, NULL, 0); if (len == 0) return 0; data = kmalloc(len, GFP_NOFS); if (data == NULL) return -ENOMEM; error = posix_acl_to_xattr(&init_user_ns, acl, data, len); if (error < 0) goto out; } else { data = NULL; len = 0; } error = __gfs2_xattr_set(inode, name, data, len, 0, GFS2_EATYPE_SYS); if (error) goto out; set_cached_acl(inode, type, acl); out: kfree(data); return error; }
166,972
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: recv_and_process_client_pkt(void /*int fd*/) { ssize_t size; len_and_sockaddr *to; struct sockaddr *from; msg_t msg; uint8_t query_status; l_fixedpt_t query_xmttime; to = get_sock_lsa(G_listen_fd); from = xzalloc(to->len); size = recv_from_to(G_listen_fd, &msg, sizeof(msg), MSG_DONTWAIT, from, &to->u.sa, to->len); if (size != NTP_MSGSIZE_NOAUTH && size != NTP_MSGSIZE) { char *addr; if (size < 0) { if (errno == EAGAIN) goto bail; bb_perror_msg_and_die("recv"); } addr = xmalloc_sockaddr2dotted_noport(from); bb_error_msg("malformed packet received from %s: size %u", addr, (int)size); free(addr); goto bail; } query_status = msg.m_status; query_xmttime = msg.m_xmttime; msg.m_ppoll = G.poll_exp; msg.m_precision_exp = G_precision_exp; /* this time was obtained between poll() and recv() */ msg.m_rectime = d_to_lfp(G.cur_time); msg.m_xmttime = d_to_lfp(gettime1900d()); /* this instant */ if (G.peer_cnt == 0) { /* we have no peers: "stratum 1 server" mode. reftime = our own time */ G.reftime = G.cur_time; } msg.m_reftime = d_to_lfp(G.reftime); msg.m_orgtime = query_xmttime; msg.m_rootdelay = d_to_sfp(G.rootdelay); msg.m_rootdisp = d_to_sfp(G.rootdisp); msg.m_refid = G.refid; // (version > (3 << VERSION_SHIFT)) ? G.refid : G.refid3; /* We reply from the local address packet was sent to, * this makes to/from look swapped here: */ do_sendto(G_listen_fd, /*from:*/ &to->u.sa, /*to:*/ from, /*addrlen:*/ to->len, &msg, size); bail: free(to); free(from); } Commit Message: CWE ID: CWE-399
recv_and_process_client_pkt(void /*int fd*/) { ssize_t size; len_and_sockaddr *to; struct sockaddr *from; msg_t msg; uint8_t query_status; l_fixedpt_t query_xmttime; to = get_sock_lsa(G_listen_fd); from = xzalloc(to->len); size = recv_from_to(G_listen_fd, &msg, sizeof(msg), MSG_DONTWAIT, from, &to->u.sa, to->len); if (size != NTP_MSGSIZE_NOAUTH && size != NTP_MSGSIZE) { char *addr; if (size < 0) { if (errno == EAGAIN) goto bail; bb_perror_msg_and_die("recv"); } addr = xmalloc_sockaddr2dotted_noport(from); bb_error_msg("malformed packet received from %s: size %u", addr, (int)size); free(addr); goto bail; } /* Respond only to client and symmetric active packets */ if ((msg.m_status & MODE_MASK) != MODE_CLIENT && (msg.m_status & MODE_MASK) != MODE_SYM_ACT ) { goto bail; } query_status = msg.m_status; query_xmttime = msg.m_xmttime; msg.m_ppoll = G.poll_exp; msg.m_precision_exp = G_precision_exp; /* this time was obtained between poll() and recv() */ msg.m_rectime = d_to_lfp(G.cur_time); msg.m_xmttime = d_to_lfp(gettime1900d()); /* this instant */ if (G.peer_cnt == 0) { /* we have no peers: "stratum 1 server" mode. reftime = our own time */ G.reftime = G.cur_time; } msg.m_reftime = d_to_lfp(G.reftime); msg.m_orgtime = query_xmttime; msg.m_rootdelay = d_to_sfp(G.rootdelay); msg.m_rootdisp = d_to_sfp(G.rootdisp); msg.m_refid = G.refid; // (version > (3 << VERSION_SHIFT)) ? G.refid : G.refid3; /* We reply from the local address packet was sent to, * this makes to/from look swapped here: */ do_sendto(G_listen_fd, /*from:*/ &to->u.sa, /*to:*/ from, /*addrlen:*/ to->len, &msg, size); bail: free(to); free(from); }
164,967
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static struct vm_area_struct *vma_to_resize(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long *p) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma = find_vma(mm, addr); if (!vma || vma->vm_start > addr) goto Efault; if (is_vm_hugetlb_page(vma)) goto Einval; /* We can't remap across vm area boundaries */ if (old_len > vma->vm_end - addr) goto Efault; if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) { if (new_len > old_len) goto Efault; } if (vma->vm_flags & VM_LOCKED) { unsigned long locked, lock_limit; locked = mm->locked_vm << PAGE_SHIFT; lock_limit = rlimit(RLIMIT_MEMLOCK); locked += new_len - old_len; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) goto Eagain; } if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) goto Enomem; if (vma->vm_flags & VM_ACCOUNT) { unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; if (security_vm_enough_memory(charged)) goto Efault; *p = charged; } return vma; Efault: /* very odd choice for most of the cases, but... */ return ERR_PTR(-EFAULT); Einval: return ERR_PTR(-EINVAL); Enomem: return ERR_PTR(-ENOMEM); Eagain: return ERR_PTR(-EAGAIN); } Commit Message: mm: avoid wrapping vm_pgoff in mremap() The normal mmap paths all avoid creating a mapping where the pgoff inside the mapping could wrap around due to overflow. However, an expanding mremap() can take such a non-wrapping mapping and make it bigger and cause a wrapping condition. Noticed by Robert Swiecki when running a system call fuzzer, where it caused a BUG_ON() due to terminally confusing the vma_prio_tree code. A vma dumping patch by Hugh then pinpointed the crazy wrapped case. Reported-and-tested-by: Robert Swiecki <[email protected]> Acked-by: Hugh Dickins <[email protected]> Cc: [email protected] Signed-off-by: Linus Torvalds <[email protected]> CWE ID: CWE-189
static struct vm_area_struct *vma_to_resize(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long *p) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma = find_vma(mm, addr); if (!vma || vma->vm_start > addr) goto Efault; if (is_vm_hugetlb_page(vma)) goto Einval; /* We can't remap across vm area boundaries */ if (old_len > vma->vm_end - addr) goto Efault; /* Need to be careful about a growing mapping */ if (new_len > old_len) { unsigned long pgoff; if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) goto Efault; pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) goto Einval; } if (vma->vm_flags & VM_LOCKED) { unsigned long locked, lock_limit; locked = mm->locked_vm << PAGE_SHIFT; lock_limit = rlimit(RLIMIT_MEMLOCK); locked += new_len - old_len; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) goto Eagain; } if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) goto Enomem; if (vma->vm_flags & VM_ACCOUNT) { unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; if (security_vm_enough_memory(charged)) goto Efault; *p = charged; } return vma; Efault: /* very odd choice for most of the cases, but... */ return ERR_PTR(-EFAULT); Einval: return ERR_PTR(-EINVAL); Enomem: return ERR_PTR(-ENOMEM); Eagain: return ERR_PTR(-EAGAIN); }
165,859
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) { struct net_device *dev = skb->dev; int fhoff, nhoff, ret; struct frag_hdr *fhdr; struct frag_queue *fq; struct ipv6hdr *hdr; u8 prevhdr; /* Jumbo payload inhibits frag. header */ if (ipv6_hdr(skb)->payload_len == 0) { pr_debug("payload len = 0\n"); return -EINVAL; } if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0) return -EINVAL; if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr))) return -ENOMEM; skb_set_transport_header(skb, fhoff); hdr = ipv6_hdr(skb); fhdr = (struct frag_hdr *)skb_transport_header(skb); fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); if (fq == NULL) { pr_debug("Can't find and can't create new queue\n"); return -ENOMEM; } spin_lock_bh(&fq->q.lock); if (nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) { ret = -EINVAL; goto out_unlock; } /* after queue has assumed skb ownership, only 0 or -EINPROGRESS * must be returned. */ ret = -EINPROGRESS; if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && fq->q.meat == fq->q.len && nf_ct_frag6_reasm(fq, skb, dev)) ret = 0; out_unlock: spin_unlock_bh(&fq->q.lock); inet_frag_put(&fq->q, &nf_frags); return ret; } Commit Message: netfilter: ipv6: nf_defrag: drop mangled skb on ream error Dmitry Vyukov reported GPF in network stack that Andrey traced down to negative nh offset in nf_ct_frag6_queue(). Problem is that all network headers before fragment header are pulled. Normal ipv6 reassembly will drop the skb when errors occur further down the line. netfilter doesn't do this, and instead passed the original fragment along. That was also fine back when netfilter ipv6 defrag worked with cloned fragments, as the original, pristine fragment was passed on. So we either have to undo the pull op, or discard such fragments. Since they're malformed after all (e.g. overlapping fragment) it seems preferrable to just drop them. Same for temporary errors -- it doesn't make sense to accept (and perhaps forward!) only some fragments of same datagram. Fixes: 029f7f3b8701cc7ac ("netfilter: ipv6: nf_defrag: avoid/free clone operations") Reported-by: Dmitry Vyukov <[email protected]> Debugged-by: Andrey Konovalov <[email protected]> Diagnosed-by: Eric Dumazet <Eric Dumazet <[email protected]> Signed-off-by: Florian Westphal <[email protected]> Acked-by: Eric Dumazet <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]> CWE ID: CWE-787
int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) { struct net_device *dev = skb->dev; int fhoff, nhoff, ret; struct frag_hdr *fhdr; struct frag_queue *fq; struct ipv6hdr *hdr; u8 prevhdr; /* Jumbo payload inhibits frag. header */ if (ipv6_hdr(skb)->payload_len == 0) { pr_debug("payload len = 0\n"); return 0; } if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0) return 0; if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr))) return -ENOMEM; skb_set_transport_header(skb, fhoff); hdr = ipv6_hdr(skb); fhdr = (struct frag_hdr *)skb_transport_header(skb); fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); if (fq == NULL) { pr_debug("Can't find and can't create new queue\n"); return -ENOMEM; } spin_lock_bh(&fq->q.lock); if (nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) { ret = -EINVAL; goto out_unlock; } /* after queue has assumed skb ownership, only 0 or -EINPROGRESS * must be returned. */ ret = -EINPROGRESS; if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && fq->q.meat == fq->q.len && nf_ct_frag6_reasm(fq, skb, dev)) ret = 0; out_unlock: spin_unlock_bh(&fq->q.lock); inet_frag_put(&fq->q, &nf_frags); return ret; }
166,850
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void FrameSelection::FocusedOrActiveStateChanged() { bool active_and_focused = FrameIsFocusedAndActive(); if (Element* element = GetDocument().FocusedElement()) element->FocusStateChanged(); GetDocument().UpdateStyleAndLayoutTree(); auto* view = GetDocument().GetLayoutView(); if (view) layout_selection_->InvalidatePaintForSelection(); if (active_and_focused) SetSelectionFromNone(); frame_caret_->SetCaretVisibility(active_and_focused ? CaretVisibility::kVisible : CaretVisibility::kHidden); frame_->GetEventHandler().CapsLockStateMayHaveChanged(); if (use_secure_keyboard_entry_when_active_) SetUseSecureKeyboardEntry(active_and_focused); } Commit Message: MacViews: Enable secure text input for password Textfields. In Cocoa the NSTextInputContext automatically enables secure text input when activated and it's in the secure text entry mode. RenderWidgetHostViewMac did the similar thing for ages following the WebKit example. views::Textfield needs to do the same thing in a fashion that's sycnrhonized with RenderWidgetHostViewMac, otherwise the race conditions are possible when the Textfield gets focus, activates the secure text input mode and the RWHVM loses focus immediately afterwards and disables the secure text input instead of leaving it in the enabled state. BUG=818133,677220 Change-Id: I6db6c4b59e4a1a72cbb7f8c7056f71b04a3df08b Reviewed-on: https://chromium-review.googlesource.com/943064 Commit-Queue: Michail Pishchagin <[email protected]> Reviewed-by: Pavel Feldman <[email protected]> Reviewed-by: Avi Drissman <[email protected]> Reviewed-by: Peter Kasting <[email protected]> Cr-Commit-Position: refs/heads/master@{#542517} CWE ID:
void FrameSelection::FocusedOrActiveStateChanged() { bool active_and_focused = FrameIsFocusedAndActive(); if (Element* element = GetDocument().FocusedElement()) element->FocusStateChanged(); GetDocument().UpdateStyleAndLayoutTree(); auto* view = GetDocument().GetLayoutView(); if (view) layout_selection_->InvalidatePaintForSelection(); if (active_and_focused) SetSelectionFromNone(); frame_caret_->SetCaretVisibility(active_and_focused ? CaretVisibility::kVisible : CaretVisibility::kHidden); frame_->GetEventHandler().CapsLockStateMayHaveChanged(); }
171,854
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: xmlParsePEReference(xmlParserCtxtPtr ctxt) { const xmlChar *name; xmlEntityPtr entity = NULL; xmlParserInputPtr input; if (RAW != '%') return; NEXT; name = xmlParseName(ctxt); if (name == NULL) { xmlFatalErrMsg(ctxt, XML_ERR_NAME_REQUIRED, "xmlParsePEReference: no name\n"); return; } if (RAW != ';') { xmlFatalErr(ctxt, XML_ERR_ENTITYREF_SEMICOL_MISSING, NULL); return; } NEXT; /* * Increate the number of entity references parsed */ ctxt->nbentities++; /* * Request the entity from SAX */ if ((ctxt->sax != NULL) && (ctxt->sax->getParameterEntity != NULL)) entity = ctxt->sax->getParameterEntity(ctxt->userData, name); if (entity == NULL) { /* * [ WFC: Entity Declared ] * In a document without any DTD, a document with only an * internal DTD subset which contains no parameter entity * references, or a document with "standalone='yes'", ... * ... The declaration of a parameter entity must precede * any reference to it... */ if ((ctxt->standalone == 1) || ((ctxt->hasExternalSubset == 0) && (ctxt->hasPErefs == 0))) { xmlFatalErrMsgStr(ctxt, XML_ERR_UNDECLARED_ENTITY, "PEReference: %%%s; not found\n", name); } else { /* * [ VC: Entity Declared ] * In a document with an external subset or external * parameter entities with "standalone='no'", ... * ... The declaration of a parameter entity must * precede any reference to it... */ xmlWarningMsg(ctxt, XML_WAR_UNDECLARED_ENTITY, "PEReference: %%%s; not found\n", name, NULL); ctxt->valid = 0; } } else { /* * Internal checking in case the entity quest barfed */ if ((entity->etype != XML_INTERNAL_PARAMETER_ENTITY) && (entity->etype != XML_EXTERNAL_PARAMETER_ENTITY)) { xmlWarningMsg(ctxt, XML_WAR_UNDECLARED_ENTITY, "Internal: %%%s; is not a parameter entity\n", name, NULL); } else if (ctxt->input->free != deallocblankswrapper) { input = xmlNewBlanksWrapperInputStream(ctxt, entity); if (xmlPushInput(ctxt, input) < 0) return; } else { /* * TODO !!! * handle the extra spaces added before and after * c.f. http://www.w3.org/TR/REC-xml#as-PE */ input = xmlNewEntityInputStream(ctxt, entity); if (xmlPushInput(ctxt, input) < 0) return; if ((entity->etype == XML_EXTERNAL_PARAMETER_ENTITY) && (CMP5(CUR_PTR, '<', '?', 'x', 'm', 'l')) && (IS_BLANK_CH(NXT(5)))) { xmlParseTextDecl(ctxt); if (ctxt->errNo == XML_ERR_UNSUPPORTED_ENCODING) { /* * The XML REC instructs us to stop parsing * right here */ ctxt->instate = XML_PARSER_EOF; return; } } } } ctxt->hasPErefs = 1; } Commit Message: libxml: XML_PARSER_EOF checks from upstream BUG=229019 TBR=cpu Review URL: https://chromiumcodereview.appspot.com/14053009 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@196804 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-119
xmlParsePEReference(xmlParserCtxtPtr ctxt) { const xmlChar *name; xmlEntityPtr entity = NULL; xmlParserInputPtr input; if (RAW != '%') return; NEXT; name = xmlParseName(ctxt); if (name == NULL) { xmlFatalErrMsg(ctxt, XML_ERR_NAME_REQUIRED, "xmlParsePEReference: no name\n"); return; } if (RAW != ';') { xmlFatalErr(ctxt, XML_ERR_ENTITYREF_SEMICOL_MISSING, NULL); return; } NEXT; /* * Increate the number of entity references parsed */ ctxt->nbentities++; /* * Request the entity from SAX */ if ((ctxt->sax != NULL) && (ctxt->sax->getParameterEntity != NULL)) entity = ctxt->sax->getParameterEntity(ctxt->userData, name); if (ctxt->instate == XML_PARSER_EOF) return; if (entity == NULL) { /* * [ WFC: Entity Declared ] * In a document without any DTD, a document with only an * internal DTD subset which contains no parameter entity * references, or a document with "standalone='yes'", ... * ... The declaration of a parameter entity must precede * any reference to it... */ if ((ctxt->standalone == 1) || ((ctxt->hasExternalSubset == 0) && (ctxt->hasPErefs == 0))) { xmlFatalErrMsgStr(ctxt, XML_ERR_UNDECLARED_ENTITY, "PEReference: %%%s; not found\n", name); } else { /* * [ VC: Entity Declared ] * In a document with an external subset or external * parameter entities with "standalone='no'", ... * ... The declaration of a parameter entity must * precede any reference to it... */ xmlWarningMsg(ctxt, XML_WAR_UNDECLARED_ENTITY, "PEReference: %%%s; not found\n", name, NULL); ctxt->valid = 0; } } else { /* * Internal checking in case the entity quest barfed */ if ((entity->etype != XML_INTERNAL_PARAMETER_ENTITY) && (entity->etype != XML_EXTERNAL_PARAMETER_ENTITY)) { xmlWarningMsg(ctxt, XML_WAR_UNDECLARED_ENTITY, "Internal: %%%s; is not a parameter entity\n", name, NULL); } else if (ctxt->input->free != deallocblankswrapper) { input = xmlNewBlanksWrapperInputStream(ctxt, entity); if (xmlPushInput(ctxt, input) < 0) return; } else { /* * TODO !!! * handle the extra spaces added before and after * c.f. http://www.w3.org/TR/REC-xml#as-PE */ input = xmlNewEntityInputStream(ctxt, entity); if (xmlPushInput(ctxt, input) < 0) return; if ((entity->etype == XML_EXTERNAL_PARAMETER_ENTITY) && (CMP5(CUR_PTR, '<', '?', 'x', 'm', 'l')) && (IS_BLANK_CH(NXT(5)))) { xmlParseTextDecl(ctxt); if (ctxt->errNo == XML_ERR_UNSUPPORTED_ENCODING) { /* * The XML REC instructs us to stop parsing * right here */ ctxt->instate = XML_PARSER_EOF; return; } } } } ctxt->hasPErefs = 1; }
171,299
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool BitReaderCore::ReadBitsInternal(int num_bits, uint64_t* out) { DCHECK_GE(num_bits, 0); if (num_bits == 0) { *out = 0; return true; } if (num_bits > nbits_ && !Refill(num_bits)) { nbits_ = 0; reg_ = 0; return false; } bits_read_ += num_bits; if (num_bits == kRegWidthInBits) { *out = reg_; reg_ = 0; nbits_ = 0; return true; } *out = reg_ >> (kRegWidthInBits - num_bits); reg_ <<= num_bits; nbits_ -= num_bits; return true; } Commit Message: Cleanup media BitReader ReadBits() calls Initialize temporary values, check return values. Small tweaks to solution proposed by [email protected]. Bug: 929962 Change-Id: Iaa7da7534174882d040ec7e4c353ba5cd0da5735 Reviewed-on: https://chromium-review.googlesource.com/c/1481085 Commit-Queue: Chrome Cunningham <[email protected]> Reviewed-by: Dan Sanders <[email protected]> Cr-Commit-Position: refs/heads/master@{#634889} CWE ID: CWE-200
bool BitReaderCore::ReadBitsInternal(int num_bits, uint64_t* out) { DCHECK_GE(num_bits, 0); if (num_bits == 0) { *out = 0; return true; } if (num_bits > nbits_ && !Refill(num_bits)) { nbits_ = 0; reg_ = 0; *out = 0; return false; } bits_read_ += num_bits; if (num_bits == kRegWidthInBits) { *out = reg_; reg_ = 0; nbits_ = 0; return true; } *out = reg_ >> (kRegWidthInBits - num_bits); reg_ <<= num_bits; nbits_ -= num_bits; return true; }
173,017
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool SessionService::CreateTabsAndWindows( const std::vector<SessionCommand*>& data, std::map<int, SessionTab*>* tabs, std::map<int, SessionWindow*>* windows) { for (std::vector<SessionCommand*>::const_iterator i = data.begin(); i != data.end(); ++i) { const SessionCommand::id_type kCommandSetWindowBounds2 = 10; const SessionCommand* command = *i; switch (command->id()) { case kCommandSetTabWindow: { SessionID::id_type payload[2]; if (!command->GetPayload(payload, sizeof(payload))) return true; GetTab(payload[1], tabs)->window_id.set_id(payload[0]); break; } case kCommandSetWindowBounds2: { WindowBoundsPayload2 payload; if (!command->GetPayload(&payload, sizeof(payload))) return true; GetWindow(payload.window_id, windows)->bounds.SetRect(payload.x, payload.y, payload.w, payload.h); GetWindow(payload.window_id, windows)->show_state = payload.is_maximized ? ui::SHOW_STATE_MAXIMIZED : ui::SHOW_STATE_NORMAL; break; } case kCommandSetWindowBounds3: { WindowBoundsPayload3 payload; if (!command->GetPayload(&payload, sizeof(payload))) return true; GetWindow(payload.window_id, windows)->bounds.SetRect(payload.x, payload.y, payload.w, payload.h); ui::WindowShowState show_state = ui::SHOW_STATE_NORMAL; if (payload.show_state > ui::SHOW_STATE_DEFAULT && payload.show_state < ui::SHOW_STATE_END && payload.show_state != ui::SHOW_STATE_INACTIVE) { show_state = static_cast<ui::WindowShowState>(payload.show_state); } else { NOTREACHED(); } GetWindow(payload.window_id, windows)->show_state = show_state; break; } case kCommandSetTabIndexInWindow: { TabIndexInWindowPayload payload; if (!command->GetPayload(&payload, sizeof(payload))) return true; GetTab(payload.id, tabs)->tab_visual_index = payload.index; break; } case kCommandTabClosed: case kCommandWindowClosed: { ClosedPayload payload; if (!command->GetPayload(&payload, sizeof(payload))) return true; if (command->id() == kCommandTabClosed) { delete GetTab(payload.id, tabs); tabs->erase(payload.id); } else { delete GetWindow(payload.id, windows); windows->erase(payload.id); } break; } case kCommandTabNavigationPathPrunedFromBack: { TabNavigationPathPrunedFromBackPayload payload; if (!command->GetPayload(&payload, sizeof(payload))) return true; SessionTab* tab = GetTab(payload.id, tabs); tab->navigations.erase( FindClosestNavigationWithIndex(&(tab->navigations), payload.index), tab->navigations.end()); break; } case kCommandTabNavigationPathPrunedFromFront: { TabNavigationPathPrunedFromFrontPayload payload; if (!command->GetPayload(&payload, sizeof(payload)) || payload.index <= 0) { return true; } SessionTab* tab = GetTab(payload.id, tabs); tab->current_navigation_index = std::max(-1, tab->current_navigation_index - payload.index); for (std::vector<TabNavigation>::iterator i = tab->navigations.begin(); i != tab->navigations.end();) { i->set_index(i->index() - payload.index); if (i->index() < 0) i = tab->navigations.erase(i); else ++i; } break; } case kCommandUpdateTabNavigation: { TabNavigation navigation; SessionID::id_type tab_id; if (!RestoreUpdateTabNavigationCommand(*command, &navigation, &tab_id)) return true; SessionTab* tab = GetTab(tab_id, tabs); std::vector<TabNavigation>::iterator i = FindClosestNavigationWithIndex(&(tab->navigations), navigation.index()); if (i != tab->navigations.end() && i->index() == navigation.index()) *i = navigation; else tab->navigations.insert(i, navigation); break; } case kCommandSetSelectedNavigationIndex: { SelectedNavigationIndexPayload payload; if (!command->GetPayload(&payload, sizeof(payload))) return true; GetTab(payload.id, tabs)->current_navigation_index = payload.index; break; } case kCommandSetSelectedTabInIndex: { SelectedTabInIndexPayload payload; if (!command->GetPayload(&payload, sizeof(payload))) return true; GetWindow(payload.id, windows)->selected_tab_index = payload.index; break; } case kCommandSetWindowType: { WindowTypePayload payload; if (!command->GetPayload(&payload, sizeof(payload))) return true; GetWindow(payload.id, windows)->is_constrained = false; GetWindow(payload.id, windows)->type = BrowserTypeForWindowType( static_cast<WindowType>(payload.index)); break; } case kCommandSetPinnedState: { PinnedStatePayload payload; if (!command->GetPayload(&payload, sizeof(payload))) return true; GetTab(payload.tab_id, tabs)->pinned = payload.pinned_state; break; } case kCommandSetExtensionAppID: { SessionID::id_type tab_id; std::string extension_app_id; if (!RestoreSetTabExtensionAppIDCommand( *command, &tab_id, &extension_app_id)) { return true; } GetTab(tab_id, tabs)->extension_app_id.swap(extension_app_id); break; } default: return true; } } return true; } Commit Message: Metrics for measuring how much overhead reading compressed content states adds. BUG=104293 TEST=NONE Review URL: https://chromiumcodereview.appspot.com/9426039 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@123733 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-20
bool SessionService::CreateTabsAndWindows( const std::vector<SessionCommand*>& data, std::map<int, SessionTab*>* tabs, std::map<int, SessionWindow*>* windows) { ResetContentStateReadingMetrics(); for (std::vector<SessionCommand*>::const_iterator i = data.begin(); i != data.end(); ++i) { const SessionCommand::id_type kCommandSetWindowBounds2 = 10; const SessionCommand* command = *i; switch (command->id()) { case kCommandSetTabWindow: { SessionID::id_type payload[2]; if (!command->GetPayload(payload, sizeof(payload))) return true; GetTab(payload[1], tabs)->window_id.set_id(payload[0]); break; } case kCommandSetWindowBounds2: { WindowBoundsPayload2 payload; if (!command->GetPayload(&payload, sizeof(payload))) return true; GetWindow(payload.window_id, windows)->bounds.SetRect(payload.x, payload.y, payload.w, payload.h); GetWindow(payload.window_id, windows)->show_state = payload.is_maximized ? ui::SHOW_STATE_MAXIMIZED : ui::SHOW_STATE_NORMAL; break; } case kCommandSetWindowBounds3: { WindowBoundsPayload3 payload; if (!command->GetPayload(&payload, sizeof(payload))) return true; GetWindow(payload.window_id, windows)->bounds.SetRect(payload.x, payload.y, payload.w, payload.h); ui::WindowShowState show_state = ui::SHOW_STATE_NORMAL; if (payload.show_state > ui::SHOW_STATE_DEFAULT && payload.show_state < ui::SHOW_STATE_END && payload.show_state != ui::SHOW_STATE_INACTIVE) { show_state = static_cast<ui::WindowShowState>(payload.show_state); } else { NOTREACHED(); } GetWindow(payload.window_id, windows)->show_state = show_state; break; } case kCommandSetTabIndexInWindow: { TabIndexInWindowPayload payload; if (!command->GetPayload(&payload, sizeof(payload))) return true; GetTab(payload.id, tabs)->tab_visual_index = payload.index; break; } case kCommandTabClosed: case kCommandWindowClosed: { ClosedPayload payload; if (!command->GetPayload(&payload, sizeof(payload))) return true; if (command->id() == kCommandTabClosed) { delete GetTab(payload.id, tabs); tabs->erase(payload.id); } else { delete GetWindow(payload.id, windows); windows->erase(payload.id); } break; } case kCommandTabNavigationPathPrunedFromBack: { TabNavigationPathPrunedFromBackPayload payload; if (!command->GetPayload(&payload, sizeof(payload))) return true; SessionTab* tab = GetTab(payload.id, tabs); tab->navigations.erase( FindClosestNavigationWithIndex(&(tab->navigations), payload.index), tab->navigations.end()); break; } case kCommandTabNavigationPathPrunedFromFront: { TabNavigationPathPrunedFromFrontPayload payload; if (!command->GetPayload(&payload, sizeof(payload)) || payload.index <= 0) { return true; } SessionTab* tab = GetTab(payload.id, tabs); tab->current_navigation_index = std::max(-1, tab->current_navigation_index - payload.index); for (std::vector<TabNavigation>::iterator i = tab->navigations.begin(); i != tab->navigations.end();) { i->set_index(i->index() - payload.index); if (i->index() < 0) i = tab->navigations.erase(i); else ++i; } break; } case kCommandUpdateTabNavigation: { TabNavigation navigation; SessionID::id_type tab_id; if (!RestoreUpdateTabNavigationCommand(*command, &navigation, &tab_id)) return true; SessionTab* tab = GetTab(tab_id, tabs); std::vector<TabNavigation>::iterator i = FindClosestNavigationWithIndex(&(tab->navigations), navigation.index()); if (i != tab->navigations.end() && i->index() == navigation.index()) *i = navigation; else tab->navigations.insert(i, navigation); break; } case kCommandSetSelectedNavigationIndex: { SelectedNavigationIndexPayload payload; if (!command->GetPayload(&payload, sizeof(payload))) return true; GetTab(payload.id, tabs)->current_navigation_index = payload.index; break; } case kCommandSetSelectedTabInIndex: { SelectedTabInIndexPayload payload; if (!command->GetPayload(&payload, sizeof(payload))) return true; GetWindow(payload.id, windows)->selected_tab_index = payload.index; break; } case kCommandSetWindowType: { WindowTypePayload payload; if (!command->GetPayload(&payload, sizeof(payload))) return true; GetWindow(payload.id, windows)->is_constrained = false; GetWindow(payload.id, windows)->type = BrowserTypeForWindowType( static_cast<WindowType>(payload.index)); break; } case kCommandSetPinnedState: { PinnedStatePayload payload; if (!command->GetPayload(&payload, sizeof(payload))) return true; GetTab(payload.tab_id, tabs)->pinned = payload.pinned_state; break; } case kCommandSetExtensionAppID: { SessionID::id_type tab_id; std::string extension_app_id; if (!RestoreSetTabExtensionAppIDCommand( *command, &tab_id, &extension_app_id)) { return true; } GetTab(tab_id, tabs)->extension_app_id.swap(extension_app_id); break; } default: return true; } } WriteContentStateReadingMetrics(); return true; }
171,051
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void CloudPolicyController::StopAutoRetry() { scheduler_->CancelDelayedWork(); backend_.reset(); } Commit Message: Reset the device policy machinery upon retrying enrollment. BUG=chromium-os:18208 TEST=See bug description Review URL: http://codereview.chromium.org/7676005 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@97615 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
void CloudPolicyController::StopAutoRetry() { void CloudPolicyController::Reset() { SetState(STATE_TOKEN_UNAVAILABLE); }
170,282
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int attach_child_main(void* data) { struct attach_clone_payload* payload = (struct attach_clone_payload*)data; int ipc_socket = payload->ipc_socket; int procfd = payload->procfd; lxc_attach_options_t* options = payload->options; struct lxc_proc_context_info* init_ctx = payload->init_ctx; #if HAVE_SYS_PERSONALITY_H long new_personality; #endif int ret; int status; int expected; long flags; int fd; uid_t new_uid; gid_t new_gid; /* wait for the initial thread to signal us that it's ready * for us to start initializing */ expected = 0; status = -1; ret = lxc_read_nointr_expect(ipc_socket, &status, sizeof(status), &expected); if (ret <= 0) { ERROR("error using IPC to receive notification from initial process (0)"); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } /* A description of the purpose of this functionality is * provided in the lxc-attach(1) manual page. We have to * remount here and not in the parent process, otherwise * /proc may not properly reflect the new pid namespace. */ if (!(options->namespaces & CLONE_NEWNS) && (options->attach_flags & LXC_ATTACH_REMOUNT_PROC_SYS)) { ret = lxc_attach_remount_sys_proc(); if (ret < 0) { shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } } /* now perform additional attachments*/ #if HAVE_SYS_PERSONALITY_H if (options->personality < 0) new_personality = init_ctx->personality; else new_personality = options->personality; if (options->attach_flags & LXC_ATTACH_SET_PERSONALITY) { ret = personality(new_personality); if (ret < 0) { SYSERROR("could not ensure correct architecture"); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } } #endif if (options->attach_flags & LXC_ATTACH_DROP_CAPABILITIES) { ret = lxc_attach_drop_privs(init_ctx); if (ret < 0) { ERROR("could not drop privileges"); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } } /* always set the environment (specify (LXC_ATTACH_KEEP_ENV, NULL, NULL) if you want this to be a no-op) */ ret = lxc_attach_set_environment(options->env_policy, options->extra_env_vars, options->extra_keep_env); if (ret < 0) { ERROR("could not set initial environment for attached process"); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } /* set user / group id */ new_uid = 0; new_gid = 0; /* ignore errors, we will fall back to root in that case * (/proc was not mounted etc.) */ if (options->namespaces & CLONE_NEWUSER) lxc_attach_get_init_uidgid(&new_uid, &new_gid); if (options->uid != (uid_t)-1) new_uid = options->uid; if (options->gid != (gid_t)-1) new_gid = options->gid; /* setup the control tty */ if (options->stdin_fd && isatty(options->stdin_fd)) { if (setsid() < 0) { SYSERROR("unable to setsid"); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } if (ioctl(options->stdin_fd, TIOCSCTTY, (char *)NULL) < 0) { SYSERROR("unable to TIOCSTTY"); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } } /* try to set the uid/gid combination */ if ((new_gid != 0 || options->namespaces & CLONE_NEWUSER)) { if (setgid(new_gid) || setgroups(0, NULL)) { SYSERROR("switching to container gid"); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } } if ((new_uid != 0 || options->namespaces & CLONE_NEWUSER) && setuid(new_uid)) { SYSERROR("switching to container uid"); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } /* tell initial process it may now put us into the cgroups */ status = 1; ret = lxc_write_nointr(ipc_socket, &status, sizeof(status)); if (ret != sizeof(status)) { ERROR("error using IPC to notify initial process for initialization (1)"); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } /* wait for the initial thread to signal us that it has done * everything for us when it comes to cgroups etc. */ expected = 2; status = -1; ret = lxc_read_nointr_expect(ipc_socket, &status, sizeof(status), &expected); if (ret <= 0) { ERROR("error using IPC to receive final notification from initial process (2)"); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } shutdown(ipc_socket, SHUT_RDWR); close(ipc_socket); if ((init_ctx->container && init_ctx->container->lxc_conf && init_ctx->container->lxc_conf->no_new_privs) || (options->attach_flags & LXC_ATTACH_NO_NEW_PRIVS)) { if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) < 0) { SYSERROR("PR_SET_NO_NEW_PRIVS could not be set. " "Process can use execve() gainable " "privileges."); rexit(-1); } INFO("PR_SET_NO_NEW_PRIVS is set. Process cannot use execve() " "gainable privileges."); } /* set new apparmor profile/selinux context */ if ((options->namespaces & CLONE_NEWNS) && (options->attach_flags & LXC_ATTACH_LSM) && init_ctx->lsm_label) { int on_exec; on_exec = options->attach_flags & LXC_ATTACH_LSM_EXEC ? 1 : 0; if (lsm_set_label_at(procfd, on_exec, init_ctx->lsm_label) < 0) { rexit(-1); } } if (init_ctx->container && init_ctx->container->lxc_conf && init_ctx->container->lxc_conf->seccomp && (lxc_seccomp_load(init_ctx->container->lxc_conf) != 0)) { ERROR("Loading seccomp policy"); rexit(-1); } lxc_proc_put_context_info(init_ctx); /* The following is done after the communication socket is * shut down. That way, all errors that might (though * unlikely) occur up until this point will have their messages * printed to the original stderr (if logging is so configured) * and not the fd the user supplied, if any. */ /* fd handling for stdin, stdout and stderr; * ignore errors here, user may want to make sure * the fds are closed, for example */ if (options->stdin_fd >= 0 && options->stdin_fd != 0) dup2(options->stdin_fd, 0); if (options->stdout_fd >= 0 && options->stdout_fd != 1) dup2(options->stdout_fd, 1); if (options->stderr_fd >= 0 && options->stderr_fd != 2) dup2(options->stderr_fd, 2); /* close the old fds */ if (options->stdin_fd > 2) close(options->stdin_fd); if (options->stdout_fd > 2) close(options->stdout_fd); if (options->stderr_fd > 2) close(options->stderr_fd); /* try to remove CLOEXEC flag from stdin/stdout/stderr, * but also here, ignore errors */ for (fd = 0; fd <= 2; fd++) { flags = fcntl(fd, F_GETFL); if (flags < 0) continue; if (flags & FD_CLOEXEC) if (fcntl(fd, F_SETFL, flags & ~FD_CLOEXEC) < 0) SYSERROR("Unable to clear CLOEXEC from fd"); } /* we don't need proc anymore */ close(procfd); /* we're done, so we can now do whatever the user intended us to do */ rexit(payload->exec_function(payload->exec_payload)); } Commit Message: attach: do not send procfd to attached process So far, we opened a file descriptor refering to proc on the host inside the host namespace and handed that fd to the attached process in attach_child_main(). This was done to ensure that LSM labels were correctly setup. However, by exploiting a potential kernel bug, ptrace could be used to prevent the file descriptor from being closed which in turn could be used by an unprivileged container to gain access to the host namespace. Aside from this needing an upstream kernel fix, we should make sure that we don't pass the fd for proc itself to the attached process. However, we cannot completely prevent this, as the attached process needs to be able to change its apparmor profile by writing to /proc/self/attr/exec or /proc/self/attr/current. To minimize the attack surface, we only send the fd for /proc/self/attr/exec or /proc/self/attr/current to the attached process. To do this we introduce a little more IPC between the child and parent: * IPC mechanism: (X is receiver) * initial process intermediate attached * X <--- send pid of * attached proc, * then exit * send 0 ------------------------------------> X * [do initialization] * X <------------------------------------ send 1 * [add to cgroup, ...] * send 2 ------------------------------------> X * [set LXC_ATTACH_NO_NEW_PRIVS] * X <------------------------------------ send 3 * [open LSM label fd] * send 4 ------------------------------------> X * [set LSM label] * close socket close socket * run program The attached child tells the parent when it is ready to have its LSM labels set up. The parent then opens an approriate fd for the child PID to /proc/<pid>/attr/exec or /proc/<pid>/attr/current and sends it via SCM_RIGHTS to the child. The child can then set its LSM laben. Both sides then close the socket fds and the child execs the requested process. Signed-off-by: Christian Brauner <[email protected]> CWE ID: CWE-264
static int attach_child_main(void* data) { struct attach_clone_payload* payload = (struct attach_clone_payload*)data; int ipc_socket = payload->ipc_socket; lxc_attach_options_t* options = payload->options; struct lxc_proc_context_info* init_ctx = payload->init_ctx; #if HAVE_SYS_PERSONALITY_H long new_personality; #endif int ret; int status; int expected; long flags; int fd; int lsm_labelfd; uid_t new_uid; gid_t new_gid; /* wait for the initial thread to signal us that it's ready * for us to start initializing */ expected = 0; status = -1; ret = lxc_read_nointr_expect(ipc_socket, &status, sizeof(status), &expected); if (ret <= 0) { ERROR("Error using IPC to receive notification from initial process (0): %s.", strerror(errno)); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } /* A description of the purpose of this functionality is * provided in the lxc-attach(1) manual page. We have to * remount here and not in the parent process, otherwise * /proc may not properly reflect the new pid namespace. */ if (!(options->namespaces & CLONE_NEWNS) && (options->attach_flags & LXC_ATTACH_REMOUNT_PROC_SYS)) { ret = lxc_attach_remount_sys_proc(); if (ret < 0) { shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } } /* now perform additional attachments*/ #if HAVE_SYS_PERSONALITY_H if (options->personality < 0) new_personality = init_ctx->personality; else new_personality = options->personality; if (options->attach_flags & LXC_ATTACH_SET_PERSONALITY) { ret = personality(new_personality); if (ret < 0) { SYSERROR("could not ensure correct architecture"); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } } #endif if (options->attach_flags & LXC_ATTACH_DROP_CAPABILITIES) { ret = lxc_attach_drop_privs(init_ctx); if (ret < 0) { ERROR("could not drop privileges"); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } } /* always set the environment (specify (LXC_ATTACH_KEEP_ENV, NULL, NULL) if you want this to be a no-op) */ ret = lxc_attach_set_environment(options->env_policy, options->extra_env_vars, options->extra_keep_env); if (ret < 0) { ERROR("could not set initial environment for attached process"); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } /* set user / group id */ new_uid = 0; new_gid = 0; /* ignore errors, we will fall back to root in that case * (/proc was not mounted etc.) */ if (options->namespaces & CLONE_NEWUSER) lxc_attach_get_init_uidgid(&new_uid, &new_gid); if (options->uid != (uid_t)-1) new_uid = options->uid; if (options->gid != (gid_t)-1) new_gid = options->gid; /* setup the control tty */ if (options->stdin_fd && isatty(options->stdin_fd)) { if (setsid() < 0) { SYSERROR("unable to setsid"); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } if (ioctl(options->stdin_fd, TIOCSCTTY, (char *)NULL) < 0) { SYSERROR("unable to TIOCSTTY"); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } } /* try to set the uid/gid combination */ if ((new_gid != 0 || options->namespaces & CLONE_NEWUSER)) { if (setgid(new_gid) || setgroups(0, NULL)) { SYSERROR("switching to container gid"); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } } if ((new_uid != 0 || options->namespaces & CLONE_NEWUSER) && setuid(new_uid)) { SYSERROR("switching to container uid"); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } /* tell initial process it may now put us into the cgroups */ status = 1; ret = lxc_write_nointr(ipc_socket, &status, sizeof(status)); if (ret != sizeof(status)) { ERROR("Error using IPC to notify initial process for initialization (1): %s.", strerror(errno)); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } /* wait for the initial thread to signal us that it has done * everything for us when it comes to cgroups etc. */ expected = 2; status = -1; ret = lxc_read_nointr_expect(ipc_socket, &status, sizeof(status), &expected); if (ret <= 0) { ERROR("Error using IPC to receive message from initial process " "that it is done pre-initializing (2): %s", strerror(errno)); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } if ((init_ctx->container && init_ctx->container->lxc_conf && init_ctx->container->lxc_conf->no_new_privs) || (options->attach_flags & LXC_ATTACH_NO_NEW_PRIVS)) { if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) < 0) { SYSERROR("PR_SET_NO_NEW_PRIVS could not be set. " "Process can use execve() gainable " "privileges."); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } INFO("PR_SET_NO_NEW_PRIVS is set. Process cannot use execve() " "gainable privileges."); } /* Tell the (grand)parent to send us LSM label fd. */ status = 3; ret = lxc_write_nointr(ipc_socket, &status, sizeof(status)); if (ret <= 0) { ERROR("Error using IPC to tell parent to set up LSM labels (3): %s.", strerror(errno)); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } if ((options->namespaces & CLONE_NEWNS) && (options->attach_flags & LXC_ATTACH_LSM) && init_ctx->lsm_label) { int on_exec; /* Receive fd for LSM security module. */ ret = lxc_abstract_unix_recv_fd(ipc_socket, &lsm_labelfd, NULL, 0); if (ret <= 0) { ERROR("Error using IPC for parent to tell us LSM label fd (4): %s.", strerror(errno)); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } /* Change into our new LSM profile. */ on_exec = options->attach_flags & LXC_ATTACH_LSM_EXEC ? 1 : 0; if (lsm_set_label_at(lsm_labelfd, on_exec, init_ctx->lsm_label) < 0) { SYSERROR("Failed to set LSM label."); shutdown(ipc_socket, SHUT_RDWR); close(lsm_labelfd); rexit(-1); } close(lsm_labelfd); } if (init_ctx->container && init_ctx->container->lxc_conf && init_ctx->container->lxc_conf->seccomp && (lxc_seccomp_load(init_ctx->container->lxc_conf) != 0)) { ERROR("Loading seccomp policy"); shutdown(ipc_socket, SHUT_RDWR); rexit(-1); } shutdown(ipc_socket, SHUT_RDWR); close(ipc_socket); lxc_proc_put_context_info(init_ctx); /* The following is done after the communication socket is * shut down. That way, all errors that might (though * unlikely) occur up until this point will have their messages * printed to the original stderr (if logging is so configured) * and not the fd the user supplied, if any. */ /* fd handling for stdin, stdout and stderr; * ignore errors here, user may want to make sure * the fds are closed, for example */ if (options->stdin_fd >= 0 && options->stdin_fd != 0) dup2(options->stdin_fd, 0); if (options->stdout_fd >= 0 && options->stdout_fd != 1) dup2(options->stdout_fd, 1); if (options->stderr_fd >= 0 && options->stderr_fd != 2) dup2(options->stderr_fd, 2); /* close the old fds */ if (options->stdin_fd > 2) close(options->stdin_fd); if (options->stdout_fd > 2) close(options->stdout_fd); if (options->stderr_fd > 2) close(options->stderr_fd); /* try to remove CLOEXEC flag from stdin/stdout/stderr, * but also here, ignore errors */ for (fd = 0; fd <= 2; fd++) { flags = fcntl(fd, F_GETFL); if (flags < 0) continue; if (flags & FD_CLOEXEC) if (fcntl(fd, F_SETFL, flags & ~FD_CLOEXEC) < 0) SYSERROR("Unable to clear CLOEXEC from fd"); } /* we're done, so we can now do whatever the user intended us to do */ rexit(payload->exec_function(payload->exec_payload)); }
168,770
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void DevToolsSession::AddHandler( std::unique_ptr<protocol::DevToolsDomainHandler> handler) { handler->Wire(dispatcher_.get()); handler->SetRenderer(process_, host_); handlers_[handler->name()] = std::move(handler); } Commit Message: DevTools: speculative fix for crash in NetworkHandler::Disable This keeps BrowserContext* and StoragePartition* instead of RenderProcessHost* in an attemp to resolve UAF of RenderProcessHost upon closure of DevTools front-end. Bug: 801117, 783067, 780694 Change-Id: I6c2cca60cc0c29f0949d189cf918769059f80c1b Reviewed-on: https://chromium-review.googlesource.com/876657 Commit-Queue: Andrey Kosyakov <[email protected]> Reviewed-by: Dmitry Gozman <[email protected]> Cr-Commit-Position: refs/heads/master@{#531157} CWE ID: CWE-20
void DevToolsSession::AddHandler( std::unique_ptr<protocol::DevToolsDomainHandler> handler) { handler->Wire(dispatcher_.get()); handler->SetRenderer(process_host_id_, host_); handlers_[handler->name()] = std::move(handler); }
172,740
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int cmd_mount(void *data, const char *_input) { ut64 off = 0; char *input, *oinput, *ptr, *ptr2; RList *list; RListIter *iter; RFSFile *file; RFSRoot *root; RFSPlugin *plug; RFSPartition *part; RCore *core = (RCore *)data; if (!strncmp ("kdir", _input, 4)) { return cmd_mkdir (data, _input); } if (!strncmp ("v", _input, 1)) { return cmd_mv (data, _input); } input = oinput = strdup (_input); switch (*input) { case ' ': input++; if (input[0]==' ') input++; ptr = strchr (input, ' '); if (ptr) { *ptr = 0; ptr++; ptr2 = strchr (ptr, ' '); if (ptr2) { *ptr2 = 0; off = r_num_math (core->num, ptr2+1); } if (!r_fs_mount (core->fs, ptr, input, off)) eprintf ("Cannot mount %s\n", input); } else { if (!(ptr = r_fs_name (core->fs, core->offset))) eprintf ("Unknown filesystem type\n"); else if (!r_fs_mount (core->fs, ptr, input, core->offset)) eprintf ("Cannot mount %s\n", input); free (ptr); } break; case '-': r_fs_umount (core->fs, input+1); break; case '*': eprintf ("List commands in radare format\n"); r_list_foreach (core->fs->roots, iter, root) { r_cons_printf ("m %s %s 0x%"PFMT64x"\n", root-> path, root->p->name, root->delta); } break; case '\0': r_list_foreach (core->fs->roots, iter, root) { r_cons_printf ("%s\t0x%"PFMT64x"\t%s\n", root->p->name, root->delta, root->path); } break; case 'l': // list of plugins r_list_foreach (core->fs->plugins, iter, plug) { r_cons_printf ("%10s %s\n", plug->name, plug->desc); } break; case 'd': input++; if (input[0]==' ') input++; list = r_fs_dir (core->fs, input); if (list) { r_list_foreach (list, iter, file) { r_cons_printf ("%c %s\n", file->type, file->name); } r_list_free (list); } else eprintf ("Cannot open '%s' directory\n", input); break; case 'p': input++; if (*input == ' ') input++; ptr = strchr (input, ' '); if (ptr) { *ptr = 0; off = r_num_math (core->num, ptr+1); } list = r_fs_partitions (core->fs, input, off); if (list) { r_list_foreach (list, iter, part) { r_cons_printf ("%d %02x 0x%010"PFMT64x" 0x%010"PFMT64x"\n", part->number, part->type, part->start, part->start+part->length); } r_list_free (list); } else eprintf ("Cannot read partition\n"); break; case 'o': input++; if (input[0]==' ') input++; file = r_fs_open (core->fs, input); if (file) { r_fs_read (core->fs, file, 0, file->size); r_cons_printf ("f file %d 0x%08"PFMT64x"\n", file->size, file->off); r_fs_close (core->fs, file); } else eprintf ("Cannot open file\n"); break; case 'g': input++; if (*input == ' ') input++; ptr = strchr (input, ' '); if (ptr) *ptr++ = 0; else ptr = "./"; file = r_fs_open (core->fs, input); if (file) { r_fs_read (core->fs, file, 0, file->size); write (1, file->data, file->size); r_fs_close (core->fs, file); write (1, "\n", 1); } else if (!r_fs_dir_dump (core->fs, input, ptr)) eprintf ("Cannot open file\n"); break; case 'f': input++; switch (*input) { case '?': r_cons_printf ( "Usage: mf[no] [...]\n" " mfn /foo *.c ; search files by name in /foo path\n" " mfo /foo 0x5e91 ; search files by offset in /foo path\n" ); break; case 'n': input++; if (*input == ' ') input++; ptr = strchr (input, ' '); if (ptr) { *ptr++ = 0; list = r_fs_find_name (core->fs, input, ptr); r_list_foreach (list, iter, ptr) { r_str_chop_path (ptr); printf ("%s\n", ptr); } } else eprintf ("Unknown store path\n"); break; case 'o': input++; if (*input == ' ') input++; ptr = strchr (input, ' '); if (ptr) { *ptr++ = 0; ut64 off = r_num_math (core->num, ptr); list = r_fs_find_off (core->fs, input, off); r_list_foreach (list, iter, ptr) { r_str_chop_path (ptr); printf ("%s\n", ptr); } } else eprintf ("Unknown store path\n"); break; } break; case 's': if (core->http_up) { free (oinput); return false; } input++; if (input[0]==' ') input++; r_fs_prompt (core->fs, input); break; case 'y': eprintf ("TODO\n"); break; case '?': { const char* help_msg[] = { "Usage:", "m[-?*dgy] [...] ", "Mountpoints management", "m", "", "List all mountpoints in human readable format", "m*", "", "Same as above, but in r2 commands", "ml", "", "List filesystem plugins", "m", " /mnt", "Mount fs at /mnt with autodetect fs and current offset", "m", " /mnt ext2 0", "Mount ext2 fs at /mnt with delta 0 on IO", "m-/", "", "Umount given path (/)", "my", "", "Yank contents of file into clipboard", "mo", " /foo", "Get offset and size of given file", "mg", " /foo", "Get contents of file/dir dumped to disk (XXX?)", "mf", "[?] [o|n]", "Search files for given filename or for offset", "md", " /", "List directory contents for path", "mp", "", "List all supported partition types", "mp", " msdos 0", "Show partitions in msdos format at offset 0", "ms", " /mnt", "Open filesystem prompt at /mnt", NULL}; r_core_cmd_help (core, help_msg); } break; } free (oinput); return 0; } Commit Message: Fix #7723 - crash in ext2 GRUB code because of variable size array in stack CWE ID: CWE-119
static int cmd_mount(void *data, const char *_input) { ut64 off = 0; char *input, *oinput, *ptr, *ptr2; RList *list; RListIter *iter; RFSFile *file; RFSRoot *root; RFSPlugin *plug; RFSPartition *part; RCore *core = (RCore *)data; if (!strncmp ("kdir", _input, 4)) { return cmd_mkdir (data, _input); } if (!strncmp ("v", _input, 1)) { return cmd_mv (data, _input); } input = oinput = strdup (_input); switch (*input) { case ' ': input++; if (input[0]==' ') { input++; } ptr = strchr (input, ' '); if (ptr) { *ptr = 0; ptr++; ptr2 = strchr (ptr, ' '); if (ptr2) { *ptr2 = 0; off = r_num_math (core->num, ptr2+1); } if (!r_fs_mount (core->fs, ptr, input, off)) { eprintf ("Cannot mount %s\n", input); } } else { if (!(ptr = r_fs_name (core->fs, core->offset))) { eprintf ("Unknown filesystem type\n"); } else if (!r_fs_mount (core->fs, ptr, input, core->offset)) { eprintf ("Cannot mount %s\n", input); } free (ptr); } break; case '-': r_fs_umount (core->fs, input+1); break; case '*': eprintf ("List commands in radare format\n"); r_list_foreach (core->fs->roots, iter, root) { r_cons_printf ("m %s %s 0x%"PFMT64x"\n", root-> path, root->p->name, root->delta); } break; case '\0': r_list_foreach (core->fs->roots, iter, root) { r_cons_printf ("%s\t0x%"PFMT64x"\t%s\n", root->p->name, root->delta, root->path); } break; case 'l': // list of plugins r_list_foreach (core->fs->plugins, iter, plug) { r_cons_printf ("%10s %s\n", plug->name, plug->desc); } break; case 'd': input++; if (input[0]==' ') input++; list = r_fs_dir (core->fs, input); if (list) { r_list_foreach (list, iter, file) { r_cons_printf ("%c %s\n", file->type, file->name); } r_list_free (list); } else eprintf ("Cannot open '%s' directory\n", input); break; case 'p': input++; if (*input == ' ') input++; ptr = strchr (input, ' '); if (ptr) { *ptr = 0; off = r_num_math (core->num, ptr+1); } list = r_fs_partitions (core->fs, input, off); if (list) { r_list_foreach (list, iter, part) { r_cons_printf ("%d %02x 0x%010"PFMT64x" 0x%010"PFMT64x"\n", part->number, part->type, part->start, part->start+part->length); } r_list_free (list); } else eprintf ("Cannot read partition\n"); break; case 'o': input++; if (input[0]==' ') input++; file = r_fs_open (core->fs, input); if (file) { r_fs_read (core->fs, file, 0, file->size); r_cons_printf ("f file %d 0x%08"PFMT64x"\n", file->size, file->off); r_fs_close (core->fs, file); } else eprintf ("Cannot open file\n"); break; case 'g': input++; if (*input == ' ') input++; ptr = strchr (input, ' '); if (ptr) *ptr++ = 0; else ptr = "./"; file = r_fs_open (core->fs, input); if (file) { r_fs_read (core->fs, file, 0, file->size); write (1, file->data, file->size); r_fs_close (core->fs, file); write (1, "\n", 1); } else if (!r_fs_dir_dump (core->fs, input, ptr)) eprintf ("Cannot open file\n"); break; case 'f': input++; switch (*input) { case '?': r_cons_printf ( "Usage: mf[no] [...]\n" " mfn /foo *.c ; search files by name in /foo path\n" " mfo /foo 0x5e91 ; search files by offset in /foo path\n" ); break; case 'n': input++; if (*input == ' ') input++; ptr = strchr (input, ' '); if (ptr) { *ptr++ = 0; list = r_fs_find_name (core->fs, input, ptr); r_list_foreach (list, iter, ptr) { r_str_chop_path (ptr); printf ("%s\n", ptr); } } else eprintf ("Unknown store path\n"); break; case 'o': input++; if (*input == ' ') input++; ptr = strchr (input, ' '); if (ptr) { *ptr++ = 0; ut64 off = r_num_math (core->num, ptr); list = r_fs_find_off (core->fs, input, off); r_list_foreach (list, iter, ptr) { r_str_chop_path (ptr); printf ("%s\n", ptr); } } else eprintf ("Unknown store path\n"); break; } break; case 's': if (core->http_up) { free (oinput); return false; } input++; if (input[0]==' ') input++; r_fs_prompt (core->fs, input); break; case 'y': eprintf ("TODO\n"); break; case '?': { const char* help_msg[] = { "Usage:", "m[-?*dgy] [...] ", "Mountpoints management", "m", "", "List all mountpoints in human readable format", "m*", "", "Same as above, but in r2 commands", "ml", "", "List filesystem plugins", "m", " /mnt", "Mount fs at /mnt with autodetect fs and current offset", "m", " /mnt ext2 0", "Mount ext2 fs at /mnt with delta 0 on IO", "m-/", "", "Umount given path (/)", "my", "", "Yank contents of file into clipboard", "mo", " /foo", "Get offset and size of given file", "mg", " /foo", "Get contents of file/dir dumped to disk (XXX?)", "mf", "[?] [o|n]", "Search files for given filename or for offset", "md", " /", "List directory contents for path", "mp", "", "List all supported partition types", "mp", " msdos 0", "Show partitions in msdos format at offset 0", "ms", " /mnt", "Open filesystem prompt at /mnt", NULL}; r_core_cmd_help (core, help_msg); } break; } free (oinput); return 0; }
168,086
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: vips_tracked_malloc( size_t size ) { void *buf; vips_tracked_init(); /* Need an extra sizeof(size_t) bytes to track * size of this block. Ask for an extra 16 to make sure we don't break * alignment rules. */ size += 16; if( !(buf = g_try_malloc( size )) ) { #ifdef DEBUG g_assert_not_reached(); #endif /*DEBUG*/ vips_error( "vips_tracked", _( "out of memory --- size == %dMB" ), (int) (size / (1024.0 * 1024.0)) ); g_warning( _( "out of memory --- size == %dMB" ), (int) (size / (1024.0 * 1024.0)) ); return( NULL ); } g_mutex_lock( vips_tracked_mutex ); *((size_t *)buf) = size; buf = (void *) ((char *)buf + 16); vips_tracked_mem += size; if( vips_tracked_mem > vips_tracked_mem_highwater ) vips_tracked_mem_highwater = vips_tracked_mem; vips_tracked_allocs += 1; #ifdef DEBUG_VERBOSE printf( "vips_tracked_malloc: %p, %zd bytes\n", buf, size ); #endif /*DEBUG_VERBOSE*/ g_mutex_unlock( vips_tracked_mutex ); VIPS_GATE_MALLOC( size ); return( buf ); } Commit Message: zero memory on malloc to prevent write of uninit memory under some error conditions thanks Balint CWE ID: CWE-200
vips_tracked_malloc( size_t size ) { void *buf; vips_tracked_init(); /* Need an extra sizeof(size_t) bytes to track * size of this block. Ask for an extra 16 to make sure we don't break * alignment rules. */ size += 16; if( !(buf = g_try_malloc0( size )) ) { #ifdef DEBUG g_assert_not_reached(); #endif /*DEBUG*/ vips_error( "vips_tracked", _( "out of memory --- size == %dMB" ), (int) (size / (1024.0 * 1024.0)) ); g_warning( _( "out of memory --- size == %dMB" ), (int) (size / (1024.0 * 1024.0)) ); return( NULL ); } g_mutex_lock( vips_tracked_mutex ); *((size_t *)buf) = size; buf = (void *) ((char *)buf + 16); vips_tracked_mem += size; if( vips_tracked_mem > vips_tracked_mem_highwater ) vips_tracked_mem_highwater = vips_tracked_mem; vips_tracked_allocs += 1; #ifdef DEBUG_VERBOSE printf( "vips_tracked_malloc: %p, %zd bytes\n", buf, size ); #endif /*DEBUG_VERBOSE*/ g_mutex_unlock( vips_tracked_mutex ); VIPS_GATE_MALLOC( size ); return( buf ); }
169,740
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static Image *ReadWBMPImage(const ImageInfo *image_info, ExceptionInfo *exception) { Image *image; int byte; MagickBooleanType status; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; ssize_t y; unsigned char bit; unsigned short header; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } if (ReadBlob(image,2,(unsigned char *) &header) == 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (header != 0) ThrowReaderException(CoderError,"OnlyLevelZerofilesSupported"); /* Initialize image structure. */ if (WBMPReadInteger(image,&image->columns) == MagickFalse) ThrowReaderException(CorruptImageError,"CorruptWBMPimage"); if (WBMPReadInteger(image,&image->rows) == MagickFalse) ThrowReaderException(CorruptImageError,"CorruptWBMPimage"); if ((image->columns == 0) || (image->rows == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (DiscardBlobBytes(image,image->offset) == MagickFalse) ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); if (AcquireImageColormap(image,2) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Convert bi-level image to pixel packets. */ for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); bit=0; byte=0; for (x=0; x < (ssize_t) image->columns; x++) { if (bit == 0) { byte=ReadBlobByte(image); if (byte == EOF) ThrowReaderException(CorruptImageError,"CorruptImage"); } SetPixelIndex(indexes+x,(byte & (0x01 << (7-bit))) ? 1 : 0); bit++; if (bit == 8) bit=0; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } (void) SyncImage(image); if (EOFBlob(image) != MagickFalse) ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); (void) CloseBlob(image); return(GetFirstImageInList(image)); } Commit Message: CWE ID: CWE-119
static Image *ReadWBMPImage(const ImageInfo *image_info, ExceptionInfo *exception) { Image *image; int byte; MagickBooleanType status; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; ssize_t y; unsigned char bit; unsigned short header; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } if (ReadBlob(image,2,(unsigned char *) &header) == 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (header != 0) ThrowReaderException(CoderError,"OnlyLevelZerofilesSupported"); /* Initialize image structure. */ if (WBMPReadInteger(image,&image->columns) == MagickFalse) ThrowReaderException(CorruptImageError,"CorruptWBMPimage"); if (WBMPReadInteger(image,&image->rows) == MagickFalse) ThrowReaderException(CorruptImageError,"CorruptWBMPimage"); if ((image->columns == 0) || (image->rows == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (DiscardBlobBytes(image,image->offset) == MagickFalse) ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); if (AcquireImageColormap(image,2) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } /* Convert bi-level image to pixel packets. */ for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); bit=0; byte=0; for (x=0; x < (ssize_t) image->columns; x++) { if (bit == 0) { byte=ReadBlobByte(image); if (byte == EOF) ThrowReaderException(CorruptImageError,"CorruptImage"); } SetPixelIndex(indexes+x,(byte & (0x01 << (7-bit))) ? 1 : 0); bit++; if (bit == 8) bit=0; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } (void) SyncImage(image); if (EOFBlob(image) != MagickFalse) ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); (void) CloseBlob(image); return(GetFirstImageInList(image)); }
168,619
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int ParseCaffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config) { uint32_t chan_chunk = 0, channel_layout = 0, bcount; unsigned char *channel_identities = NULL; unsigned char *channel_reorder = NULL; int64_t total_samples = 0, infilesize; CAFFileHeader caf_file_header; CAFChunkHeader caf_chunk_header; CAFAudioFormat caf_audio_format; int i; infilesize = DoGetFileSize (infile); memcpy (&caf_file_header, fourcc, 4); if ((!DoReadFile (infile, ((char *) &caf_file_header) + 4, sizeof (CAFFileHeader) - 4, &bcount) || bcount != sizeof (CAFFileHeader) - 4)) { error_line ("%s is not a valid .CAF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &caf_file_header, sizeof (CAFFileHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&caf_file_header, CAFFileHeaderFormat); if (caf_file_header.mFileVersion != 1) { error_line ("%s: can't handle version %d .CAF files!", infilename, caf_file_header.mFileVersion); return WAVPACK_SOFT_ERROR; } while (1) { if (!DoReadFile (infile, &caf_chunk_header, sizeof (CAFChunkHeader), &bcount) || bcount != sizeof (CAFChunkHeader)) { error_line ("%s is not a valid .CAF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &caf_chunk_header, sizeof (CAFChunkHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&caf_chunk_header, CAFChunkHeaderFormat); if (!strncmp (caf_chunk_header.mChunkType, "desc", 4)) { int supported = TRUE; if (caf_chunk_header.mChunkSize != sizeof (CAFAudioFormat) || !DoReadFile (infile, &caf_audio_format, (uint32_t) caf_chunk_header.mChunkSize, &bcount) || bcount != caf_chunk_header.mChunkSize) { error_line ("%s is not a valid .CAF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &caf_audio_format, (uint32_t) caf_chunk_header.mChunkSize)) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&caf_audio_format, CAFAudioFormatFormat); if (debug_logging_mode) { char formatstr [5]; memcpy (formatstr, caf_audio_format.mFormatID, 4); formatstr [4] = 0; error_line ("format = %s, flags = %x, sampling rate = %g", formatstr, caf_audio_format.mFormatFlags, caf_audio_format.mSampleRate); error_line ("packet = %d bytes and %d frames", caf_audio_format.mBytesPerPacket, caf_audio_format.mFramesPerPacket); error_line ("channels per frame = %d, bits per channel = %d", caf_audio_format.mChannelsPerFrame, caf_audio_format.mBitsPerChannel); } if (strncmp (caf_audio_format.mFormatID, "lpcm", 4) || (caf_audio_format.mFormatFlags & ~3)) supported = FALSE; else if (caf_audio_format.mSampleRate < 1.0 || caf_audio_format.mSampleRate > 16777215.0 || caf_audio_format.mSampleRate != floor (caf_audio_format.mSampleRate)) supported = FALSE; else if (!caf_audio_format.mChannelsPerFrame || caf_audio_format.mChannelsPerFrame > 256) supported = FALSE; else if (caf_audio_format.mBitsPerChannel < 1 || caf_audio_format.mBitsPerChannel > 32 || ((caf_audio_format.mFormatFlags & CAF_FORMAT_FLOAT) && caf_audio_format.mBitsPerChannel != 32)) supported = FALSE; else if (caf_audio_format.mFramesPerPacket != 1 || caf_audio_format.mBytesPerPacket / caf_audio_format.mChannelsPerFrame < (caf_audio_format.mBitsPerChannel + 7) / 8 || caf_audio_format.mBytesPerPacket / caf_audio_format.mChannelsPerFrame > 4 || caf_audio_format.mBytesPerPacket % caf_audio_format.mChannelsPerFrame) supported = FALSE; if (!supported) { error_line ("%s is an unsupported .CAF format!", infilename); return WAVPACK_SOFT_ERROR; } config->bytes_per_sample = caf_audio_format.mBytesPerPacket / caf_audio_format.mChannelsPerFrame; config->float_norm_exp = (caf_audio_format.mFormatFlags & CAF_FORMAT_FLOAT) ? 127 : 0; config->bits_per_sample = caf_audio_format.mBitsPerChannel; config->num_channels = caf_audio_format.mChannelsPerFrame; config->sample_rate = (int) caf_audio_format.mSampleRate; if (!(caf_audio_format.mFormatFlags & CAF_FORMAT_LITTLE_ENDIAN) && config->bytes_per_sample > 1) config->qmode |= QMODE_BIG_ENDIAN; if (config->bytes_per_sample == 1) config->qmode |= QMODE_SIGNED_BYTES; if (debug_logging_mode) { if (config->float_norm_exp == 127) error_line ("data format: 32-bit %s-endian floating point", (config->qmode & QMODE_BIG_ENDIAN) ? "big" : "little"); else error_line ("data format: %d-bit %s-endian integers stored in %d byte(s)", config->bits_per_sample, (config->qmode & QMODE_BIG_ENDIAN) ? "big" : "little", config->bytes_per_sample); } } else if (!strncmp (caf_chunk_header.mChunkType, "chan", 4)) { CAFChannelLayout *caf_channel_layout = malloc ((size_t) caf_chunk_header.mChunkSize); if (caf_chunk_header.mChunkSize < sizeof (CAFChannelLayout) || !DoReadFile (infile, caf_channel_layout, (uint32_t) caf_chunk_header.mChunkSize, &bcount) || bcount != caf_chunk_header.mChunkSize) { error_line ("%s is not a valid .CAF file!", infilename); free (caf_channel_layout); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, caf_channel_layout, (uint32_t) caf_chunk_header.mChunkSize)) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (caf_channel_layout); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (caf_channel_layout, CAFChannelLayoutFormat); chan_chunk = 1; if (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED)) { error_line ("this CAF file already has channel order information!"); free (caf_channel_layout); return WAVPACK_SOFT_ERROR; } switch (caf_channel_layout->mChannelLayoutTag) { case kCAFChannelLayoutTag_UseChannelDescriptions: { CAFChannelDescription *descriptions = (CAFChannelDescription *) (caf_channel_layout + 1); int num_descriptions = caf_channel_layout->mNumberChannelDescriptions; int label, cindex = 0, idents = 0; if (caf_chunk_header.mChunkSize != sizeof (CAFChannelLayout) + sizeof (CAFChannelDescription) * num_descriptions || num_descriptions != config->num_channels) { error_line ("channel descriptions in 'chan' chunk are the wrong size!"); free (caf_channel_layout); return WAVPACK_SOFT_ERROR; } if (num_descriptions >= 256) { error_line ("%d channel descriptions is more than we can handle...ignoring!"); break; } channel_reorder = malloc (num_descriptions); memset (channel_reorder, -1, num_descriptions); channel_identities = malloc (num_descriptions+1); for (i = 0; i < num_descriptions; ++i) { WavpackBigEndianToNative (descriptions + i, CAFChannelDescriptionFormat); if (debug_logging_mode) error_line ("chan %d --> %d", i + 1, descriptions [i].mChannelLabel); } for (label = 1; label <= 18; ++label) for (i = 0; i < num_descriptions; ++i) if (descriptions [i].mChannelLabel == label) { config->channel_mask |= 1 << (label - 1); channel_reorder [i] = cindex++; break; } for (i = 0; i < num_descriptions; ++i) if (channel_reorder [i] == (unsigned char) -1) { uint32_t clabel = descriptions [i].mChannelLabel; if (clabel == 0 || clabel == 0xffffffff || clabel == 100) channel_identities [idents++] = 0xff; else if ((clabel >= 33 && clabel <= 44) || (clabel >= 200 && clabel <= 207) || (clabel >= 301 && clabel <= 305)) channel_identities [idents++] = clabel >= 301 ? clabel - 80 : clabel; else { error_line ("warning: unknown channel descriptions label: %d", clabel); channel_identities [idents++] = 0xff; } channel_reorder [i] = cindex++; } for (i = 0; i < num_descriptions; ++i) if (channel_reorder [i] != i) break; if (i == num_descriptions) { free (channel_reorder); // no reordering required, so don't channel_reorder = NULL; } else { config->qmode |= QMODE_REORDERED_CHANS; // reordering required, put channel count into layout channel_layout = num_descriptions; } if (!idents) { // if no non-MS channels, free the identities string free (channel_identities); channel_identities = NULL; } else channel_identities [idents] = 0; // otherwise NULL terminate it if (debug_logging_mode) { error_line ("layout_tag = 0x%08x, so generated bitmap of 0x%08x from %d descriptions, %d non-MS", caf_channel_layout->mChannelLayoutTag, config->channel_mask, caf_channel_layout->mNumberChannelDescriptions, idents); if (channel_reorder && num_descriptions <= 8) { char reorder_string [] = "12345678"; for (i = 0; i < num_descriptions; ++i) reorder_string [i] = channel_reorder [i] + '1'; reorder_string [i] = 0; error_line ("reordering string = \"%s\"\n", reorder_string); } } } break; case kCAFChannelLayoutTag_UseChannelBitmap: config->channel_mask = caf_channel_layout->mChannelBitmap; if (debug_logging_mode) error_line ("layout_tag = 0x%08x, so using supplied bitmap of 0x%08x", caf_channel_layout->mChannelLayoutTag, caf_channel_layout->mChannelBitmap); break; default: for (i = 0; i < NUM_LAYOUTS; ++i) if (caf_channel_layout->mChannelLayoutTag == layouts [i].mChannelLayoutTag) { config->channel_mask = layouts [i].mChannelBitmap; channel_layout = layouts [i].mChannelLayoutTag; if (layouts [i].mChannelReorder) { channel_reorder = (unsigned char *) strdup (layouts [i].mChannelReorder); config->qmode |= QMODE_REORDERED_CHANS; } if (layouts [i].mChannelIdentities) channel_identities = (unsigned char *) strdup (layouts [i].mChannelIdentities); if (debug_logging_mode) error_line ("layout_tag 0x%08x found in table, bitmap = 0x%08x, reorder = %s, identities = %s", channel_layout, config->channel_mask, channel_reorder ? "yes" : "no", channel_identities ? "yes" : "no"); break; } if (i == NUM_LAYOUTS && debug_logging_mode) error_line ("layout_tag 0x%08x not found in table...all channels unassigned", caf_channel_layout->mChannelLayoutTag); break; } free (caf_channel_layout); } else if (!strncmp (caf_chunk_header.mChunkType, "data", 4)) { // on the data chunk, get size and exit loop uint32_t mEditCount; if (!DoReadFile (infile, &mEditCount, sizeof (mEditCount), &bcount) || bcount != sizeof (mEditCount)) { error_line ("%s is not a valid .CAF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &mEditCount, sizeof (mEditCount))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } if ((config->qmode & QMODE_IGNORE_LENGTH) || caf_chunk_header.mChunkSize == -1) { config->qmode |= QMODE_IGNORE_LENGTH; if (infilesize && DoGetFilePosition (infile) != -1) total_samples = (infilesize - DoGetFilePosition (infile)) / caf_audio_format.mBytesPerPacket; else total_samples = -1; } else { if (infilesize && infilesize - caf_chunk_header.mChunkSize > 16777216) { error_line (".CAF file %s has over 16 MB of extra CAFF data, probably is corrupt!", infilename); return WAVPACK_SOFT_ERROR; } if ((caf_chunk_header.mChunkSize - 4) % caf_audio_format.mBytesPerPacket) { error_line (".CAF file %s has an invalid data chunk size, probably is corrupt!", infilename); return WAVPACK_SOFT_ERROR; } total_samples = (caf_chunk_header.mChunkSize - 4) / caf_audio_format.mBytesPerPacket; if (!total_samples) { error_line ("this .CAF file has no audio samples, probably is corrupt!"); return WAVPACK_SOFT_ERROR; } if (total_samples > MAX_WAVPACK_SAMPLES) { error_line ("%s has too many samples for WavPack!", infilename); return WAVPACK_SOFT_ERROR; } } break; } else { // just copy unknown chunks to output file int bytes_to_copy = (uint32_t) caf_chunk_header.mChunkSize; char *buff = malloc (bytes_to_copy); if (debug_logging_mode) error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes", caf_chunk_header.mChunkType [0], caf_chunk_header.mChunkType [1], caf_chunk_header.mChunkType [2], caf_chunk_header.mChunkType [3], caf_chunk_header.mChunkSize); if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) || bcount != bytes_to_copy || (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, buff, bytes_to_copy))) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (buff); return WAVPACK_SOFT_ERROR; } free (buff); } } if (!chan_chunk && !config->channel_mask && config->num_channels <= 2 && !(config->qmode & QMODE_CHANS_UNASSIGNED)) config->channel_mask = 0x5 - config->num_channels; if (!WavpackSetConfiguration64 (wpc, config, total_samples, channel_identities)) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } if (channel_identities) free (channel_identities); if (channel_layout || channel_reorder) { if (!WavpackSetChannelLayout (wpc, channel_layout, channel_reorder)) { error_line ("problem with setting channel layout (should not happen)"); return WAVPACK_SOFT_ERROR; } if (channel_reorder) free (channel_reorder); } return WAVPACK_NO_ERROR; } Commit Message: issue #28, fix buffer overflows and bad allocs on corrupt CAF files CWE ID: CWE-125
int ParseCaffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config) { uint32_t chan_chunk = 0, channel_layout = 0, bcount; unsigned char *channel_identities = NULL; unsigned char *channel_reorder = NULL; int64_t total_samples = 0, infilesize; CAFFileHeader caf_file_header; CAFChunkHeader caf_chunk_header; CAFAudioFormat caf_audio_format; int i; infilesize = DoGetFileSize (infile); memcpy (&caf_file_header, fourcc, 4); if ((!DoReadFile (infile, ((char *) &caf_file_header) + 4, sizeof (CAFFileHeader) - 4, &bcount) || bcount != sizeof (CAFFileHeader) - 4)) { error_line ("%s is not a valid .CAF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &caf_file_header, sizeof (CAFFileHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&caf_file_header, CAFFileHeaderFormat); if (caf_file_header.mFileVersion != 1) { error_line ("%s: can't handle version %d .CAF files!", infilename, caf_file_header.mFileVersion); return WAVPACK_SOFT_ERROR; } while (1) { if (!DoReadFile (infile, &caf_chunk_header, sizeof (CAFChunkHeader), &bcount) || bcount != sizeof (CAFChunkHeader)) { error_line ("%s is not a valid .CAF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &caf_chunk_header, sizeof (CAFChunkHeader))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&caf_chunk_header, CAFChunkHeaderFormat); if (!strncmp (caf_chunk_header.mChunkType, "desc", 4)) { int supported = TRUE; if (caf_chunk_header.mChunkSize != sizeof (CAFAudioFormat) || !DoReadFile (infile, &caf_audio_format, (uint32_t) caf_chunk_header.mChunkSize, &bcount) || bcount != caf_chunk_header.mChunkSize) { error_line ("%s is not a valid .CAF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &caf_audio_format, (uint32_t) caf_chunk_header.mChunkSize)) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (&caf_audio_format, CAFAudioFormatFormat); if (debug_logging_mode) { char formatstr [5]; memcpy (formatstr, caf_audio_format.mFormatID, 4); formatstr [4] = 0; error_line ("format = %s, flags = %x, sampling rate = %g", formatstr, caf_audio_format.mFormatFlags, caf_audio_format.mSampleRate); error_line ("packet = %d bytes and %d frames", caf_audio_format.mBytesPerPacket, caf_audio_format.mFramesPerPacket); error_line ("channels per frame = %d, bits per channel = %d", caf_audio_format.mChannelsPerFrame, caf_audio_format.mBitsPerChannel); } if (strncmp (caf_audio_format.mFormatID, "lpcm", 4) || (caf_audio_format.mFormatFlags & ~3)) supported = FALSE; else if (caf_audio_format.mSampleRate < 1.0 || caf_audio_format.mSampleRate > 16777215.0 || caf_audio_format.mSampleRate != floor (caf_audio_format.mSampleRate)) supported = FALSE; else if (!caf_audio_format.mChannelsPerFrame || caf_audio_format.mChannelsPerFrame > 256) supported = FALSE; else if (caf_audio_format.mBitsPerChannel < 1 || caf_audio_format.mBitsPerChannel > 32 || ((caf_audio_format.mFormatFlags & CAF_FORMAT_FLOAT) && caf_audio_format.mBitsPerChannel != 32)) supported = FALSE; else if (caf_audio_format.mFramesPerPacket != 1 || caf_audio_format.mBytesPerPacket / caf_audio_format.mChannelsPerFrame < (caf_audio_format.mBitsPerChannel + 7) / 8 || caf_audio_format.mBytesPerPacket / caf_audio_format.mChannelsPerFrame > 4 || caf_audio_format.mBytesPerPacket % caf_audio_format.mChannelsPerFrame) supported = FALSE; if (!supported) { error_line ("%s is an unsupported .CAF format!", infilename); return WAVPACK_SOFT_ERROR; } config->bytes_per_sample = caf_audio_format.mBytesPerPacket / caf_audio_format.mChannelsPerFrame; config->float_norm_exp = (caf_audio_format.mFormatFlags & CAF_FORMAT_FLOAT) ? 127 : 0; config->bits_per_sample = caf_audio_format.mBitsPerChannel; config->num_channels = caf_audio_format.mChannelsPerFrame; config->sample_rate = (int) caf_audio_format.mSampleRate; if (!(caf_audio_format.mFormatFlags & CAF_FORMAT_LITTLE_ENDIAN) && config->bytes_per_sample > 1) config->qmode |= QMODE_BIG_ENDIAN; if (config->bytes_per_sample == 1) config->qmode |= QMODE_SIGNED_BYTES; if (debug_logging_mode) { if (config->float_norm_exp == 127) error_line ("data format: 32-bit %s-endian floating point", (config->qmode & QMODE_BIG_ENDIAN) ? "big" : "little"); else error_line ("data format: %d-bit %s-endian integers stored in %d byte(s)", config->bits_per_sample, (config->qmode & QMODE_BIG_ENDIAN) ? "big" : "little", config->bytes_per_sample); } } else if (!strncmp (caf_chunk_header.mChunkType, "chan", 4)) { CAFChannelLayout *caf_channel_layout; if (caf_chunk_header.mChunkSize < sizeof (CAFChannelLayout) || caf_chunk_header.mChunkSize > 1024) { error_line ("this .CAF file has an invalid 'chan' chunk!"); return WAVPACK_SOFT_ERROR; } if (debug_logging_mode) error_line ("'chan' chunk is %d bytes", (int) caf_chunk_header.mChunkSize); caf_channel_layout = malloc ((size_t) caf_chunk_header.mChunkSize); if (!DoReadFile (infile, caf_channel_layout, (uint32_t) caf_chunk_header.mChunkSize, &bcount) || bcount != caf_chunk_header.mChunkSize) { error_line ("%s is not a valid .CAF file!", infilename); free (caf_channel_layout); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, caf_channel_layout, (uint32_t) caf_chunk_header.mChunkSize)) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (caf_channel_layout); return WAVPACK_SOFT_ERROR; } WavpackBigEndianToNative (caf_channel_layout, CAFChannelLayoutFormat); chan_chunk = 1; if (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED)) { error_line ("this CAF file already has channel order information!"); free (caf_channel_layout); return WAVPACK_SOFT_ERROR; } switch (caf_channel_layout->mChannelLayoutTag) { case kCAFChannelLayoutTag_UseChannelDescriptions: { CAFChannelDescription *descriptions = (CAFChannelDescription *) (caf_channel_layout + 1); int num_descriptions = caf_channel_layout->mNumberChannelDescriptions; int label, cindex = 0, idents = 0; if (caf_chunk_header.mChunkSize != sizeof (CAFChannelLayout) + sizeof (CAFChannelDescription) * num_descriptions || num_descriptions != config->num_channels) { error_line ("channel descriptions in 'chan' chunk are the wrong size!"); free (caf_channel_layout); return WAVPACK_SOFT_ERROR; } if (num_descriptions >= 256) { error_line ("%d channel descriptions is more than we can handle...ignoring!"); break; } channel_reorder = malloc (num_descriptions); memset (channel_reorder, -1, num_descriptions); channel_identities = malloc (num_descriptions+1); for (i = 0; i < num_descriptions; ++i) { WavpackBigEndianToNative (descriptions + i, CAFChannelDescriptionFormat); if (debug_logging_mode) error_line ("chan %d --> %d", i + 1, descriptions [i].mChannelLabel); } for (label = 1; label <= 18; ++label) for (i = 0; i < num_descriptions; ++i) if (descriptions [i].mChannelLabel == label) { config->channel_mask |= 1 << (label - 1); channel_reorder [i] = cindex++; break; } for (i = 0; i < num_descriptions; ++i) if (channel_reorder [i] == (unsigned char) -1) { uint32_t clabel = descriptions [i].mChannelLabel; if (clabel == 0 || clabel == 0xffffffff || clabel == 100) channel_identities [idents++] = 0xff; else if ((clabel >= 33 && clabel <= 44) || (clabel >= 200 && clabel <= 207) || (clabel >= 301 && clabel <= 305)) channel_identities [idents++] = clabel >= 301 ? clabel - 80 : clabel; else { error_line ("warning: unknown channel descriptions label: %d", clabel); channel_identities [idents++] = 0xff; } channel_reorder [i] = cindex++; } for (i = 0; i < num_descriptions; ++i) if (channel_reorder [i] != i) break; if (i == num_descriptions) { free (channel_reorder); // no reordering required, so don't channel_reorder = NULL; } else { config->qmode |= QMODE_REORDERED_CHANS; // reordering required, put channel count into layout channel_layout = num_descriptions; } if (!idents) { // if no non-MS channels, free the identities string free (channel_identities); channel_identities = NULL; } else channel_identities [idents] = 0; // otherwise NULL terminate it if (debug_logging_mode) { error_line ("layout_tag = 0x%08x, so generated bitmap of 0x%08x from %d descriptions, %d non-MS", caf_channel_layout->mChannelLayoutTag, config->channel_mask, caf_channel_layout->mNumberChannelDescriptions, idents); if (channel_reorder && num_descriptions <= 8) { char reorder_string [] = "12345678"; for (i = 0; i < num_descriptions; ++i) reorder_string [i] = channel_reorder [i] + '1'; reorder_string [i] = 0; error_line ("reordering string = \"%s\"\n", reorder_string); } } } break; case kCAFChannelLayoutTag_UseChannelBitmap: config->channel_mask = caf_channel_layout->mChannelBitmap; if (debug_logging_mode) error_line ("layout_tag = 0x%08x, so using supplied bitmap of 0x%08x", caf_channel_layout->mChannelLayoutTag, caf_channel_layout->mChannelBitmap); break; default: for (i = 0; i < NUM_LAYOUTS; ++i) if (caf_channel_layout->mChannelLayoutTag == layouts [i].mChannelLayoutTag) { config->channel_mask = layouts [i].mChannelBitmap; channel_layout = layouts [i].mChannelLayoutTag; if (layouts [i].mChannelReorder) { channel_reorder = (unsigned char *) strdup (layouts [i].mChannelReorder); config->qmode |= QMODE_REORDERED_CHANS; } if (layouts [i].mChannelIdentities) channel_identities = (unsigned char *) strdup (layouts [i].mChannelIdentities); if (debug_logging_mode) error_line ("layout_tag 0x%08x found in table, bitmap = 0x%08x, reorder = %s, identities = %s", channel_layout, config->channel_mask, channel_reorder ? "yes" : "no", channel_identities ? "yes" : "no"); break; } if (i == NUM_LAYOUTS && debug_logging_mode) error_line ("layout_tag 0x%08x not found in table...all channels unassigned", caf_channel_layout->mChannelLayoutTag); break; } free (caf_channel_layout); } else if (!strncmp (caf_chunk_header.mChunkType, "data", 4)) { // on the data chunk, get size and exit loop uint32_t mEditCount; if (!DoReadFile (infile, &mEditCount, sizeof (mEditCount), &bcount) || bcount != sizeof (mEditCount)) { error_line ("%s is not a valid .CAF file!", infilename); return WAVPACK_SOFT_ERROR; } else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, &mEditCount, sizeof (mEditCount))) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } if ((config->qmode & QMODE_IGNORE_LENGTH) || caf_chunk_header.mChunkSize == -1) { config->qmode |= QMODE_IGNORE_LENGTH; if (infilesize && DoGetFilePosition (infile) != -1) total_samples = (infilesize - DoGetFilePosition (infile)) / caf_audio_format.mBytesPerPacket; else total_samples = -1; } else { if (infilesize && infilesize - caf_chunk_header.mChunkSize > 16777216) { error_line (".CAF file %s has over 16 MB of extra CAFF data, probably is corrupt!", infilename); return WAVPACK_SOFT_ERROR; } if ((caf_chunk_header.mChunkSize - 4) % caf_audio_format.mBytesPerPacket) { error_line (".CAF file %s has an invalid data chunk size, probably is corrupt!", infilename); return WAVPACK_SOFT_ERROR; } total_samples = (caf_chunk_header.mChunkSize - 4) / caf_audio_format.mBytesPerPacket; if (!total_samples) { error_line ("this .CAF file has no audio samples, probably is corrupt!"); return WAVPACK_SOFT_ERROR; } if (total_samples > MAX_WAVPACK_SAMPLES) { error_line ("%s has too many samples for WavPack!", infilename); return WAVPACK_SOFT_ERROR; } } break; } else { // just copy unknown chunks to output file uint32_t bytes_to_copy = (uint32_t) caf_chunk_header.mChunkSize; char *buff; if (caf_chunk_header.mChunkSize < 0 || caf_chunk_header.mChunkSize > 1048576) { error_line ("%s is not a valid .CAF file!", infilename); return WAVPACK_SOFT_ERROR; } buff = malloc (bytes_to_copy); if (debug_logging_mode) error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes", caf_chunk_header.mChunkType [0], caf_chunk_header.mChunkType [1], caf_chunk_header.mChunkType [2], caf_chunk_header.mChunkType [3], caf_chunk_header.mChunkSize); if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) || bcount != bytes_to_copy || (!(config->qmode & QMODE_NO_STORE_WRAPPER) && !WavpackAddWrapper (wpc, buff, bytes_to_copy))) { error_line ("%s", WavpackGetErrorMessage (wpc)); free (buff); return WAVPACK_SOFT_ERROR; } free (buff); } } if (!chan_chunk && !config->channel_mask && config->num_channels <= 2 && !(config->qmode & QMODE_CHANS_UNASSIGNED)) config->channel_mask = 0x5 - config->num_channels; if (!WavpackSetConfiguration64 (wpc, config, total_samples, channel_identities)) { error_line ("%s", WavpackGetErrorMessage (wpc)); return WAVPACK_SOFT_ERROR; } if (channel_identities) free (channel_identities); if (channel_layout || channel_reorder) { if (!WavpackSetChannelLayout (wpc, channel_layout, channel_reorder)) { error_line ("problem with setting channel layout (should not happen)"); return WAVPACK_SOFT_ERROR; } if (channel_reorder) free (channel_reorder); } return WAVPACK_NO_ERROR; }
169,319
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) { struct sk_buff *skb = (struct sk_buff *)(long) ctx; struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = nla_find((struct nlattr *) &skb->data[A], skb->len - A, X); if (nla) return (void *) nla - (void *) skb->data; return 0; } Commit Message: filter: prevent nla extensions to peek beyond the end of the message The BPF_S_ANC_NLATTR and BPF_S_ANC_NLATTR_NEST extensions fail to check for a minimal message length before testing the supplied offset to be within the bounds of the message. This allows the subtraction of the nla header to underflow and therefore -- as the data type is unsigned -- allowing far to big offset and length values for the search of the netlink attribute. The remainder calculation for the BPF_S_ANC_NLATTR_NEST extension is also wrong. It has the minuend and subtrahend mixed up, therefore calculates a huge length value, allowing to overrun the end of the message while looking for the netlink attribute. The following three BPF snippets will trigger the bugs when attached to a UNIX datagram socket and parsing a message with length 1, 2 or 3. ,-[ PoC for missing size check in BPF_S_ANC_NLATTR ]-- | ld #0x87654321 | ldx #42 | ld #nla | ret a `--- ,-[ PoC for the same bug in BPF_S_ANC_NLATTR_NEST ]-- | ld #0x87654321 | ldx #42 | ld #nlan | ret a `--- ,-[ PoC for wrong remainder calculation in BPF_S_ANC_NLATTR_NEST ]-- | ; (needs a fake netlink header at offset 0) | ld #0 | ldx #42 | ld #nlan | ret a `--- Fix the first issue by ensuring the message length fulfills the minimal size constrains of a nla header. Fix the second bug by getting the math for the remainder calculation right. Fixes: 4738c1db15 ("[SKFILTER]: Add SKF_ADF_NLATTR instruction") Fixes: d214c7537b ("filter: add SKF_AD_NLATTR_NEST to look for nested..") Cc: Patrick McHardy <[email protected]> Cc: Pablo Neira Ayuso <[email protected]> Signed-off-by: Mathias Krause <[email protected]> Acked-by: Daniel Borkmann <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-189
static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) { struct sk_buff *skb = (struct sk_buff *)(long) ctx; struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (skb->len < sizeof(struct nlattr)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = nla_find((struct nlattr *) &skb->data[A], skb->len - A, X); if (nla) return (void *) nla - (void *) skb->data; return 0; }
166,383
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: eval_js(WebKitWebView * web_view, gchar *script, GString *result) { WebKitWebFrame *frame; JSGlobalContextRef context; JSObjectRef globalobject; JSStringRef var_name; JSStringRef js_script; JSValueRef js_result; JSStringRef js_result_string; size_t js_result_size; js_init(); frame = webkit_web_view_get_main_frame(WEBKIT_WEB_VIEW(web_view)); context = webkit_web_frame_get_global_context(frame); globalobject = JSContextGetGlobalObject(context); /* uzbl javascript namespace */ var_name = JSStringCreateWithUTF8CString("Uzbl"); JSObjectSetProperty(context, globalobject, var_name, JSObjectMake(context, uzbl.js.classref, NULL), kJSClassAttributeNone, NULL); /* evaluate the script and get return value*/ js_script = JSStringCreateWithUTF8CString(script); js_result = JSEvaluateScript(context, js_script, globalobject, NULL, 0, NULL); if (js_result && !JSValueIsUndefined(context, js_result)) { js_result_string = JSValueToStringCopy(context, js_result, NULL); js_result_size = JSStringGetMaximumUTF8CStringSize(js_result_string); if (js_result_size) { char js_result_utf8[js_result_size]; JSStringGetUTF8CString(js_result_string, js_result_utf8, js_result_size); g_string_assign(result, js_result_utf8); } JSStringRelease(js_result_string); } /* cleanup */ JSObjectDeleteProperty(context, globalobject, var_name, NULL); JSStringRelease(var_name); JSStringRelease(js_script); } Commit Message: disable Uzbl javascript object because of security problem. CWE ID: CWE-264
eval_js(WebKitWebView * web_view, gchar *script, GString *result) { WebKitWebFrame *frame; JSGlobalContextRef context; JSObjectRef globalobject; JSStringRef js_script; JSValueRef js_result; JSStringRef js_result_string; size_t js_result_size; js_init(); frame = webkit_web_view_get_main_frame(WEBKIT_WEB_VIEW(web_view)); context = webkit_web_frame_get_global_context(frame); globalobject = JSContextGetGlobalObject(context); /* evaluate the script and get return value*/ js_script = JSStringCreateWithUTF8CString(script); js_result = JSEvaluateScript(context, js_script, globalobject, NULL, 0, NULL); if (js_result && !JSValueIsUndefined(context, js_result)) { js_result_string = JSValueToStringCopy(context, js_result, NULL); js_result_size = JSStringGetMaximumUTF8CStringSize(js_result_string); if (js_result_size) { char js_result_utf8[js_result_size]; JSStringGetUTF8CString(js_result_string, js_result_utf8, js_result_size); g_string_assign(result, js_result_utf8); } JSStringRelease(js_result_string); } /* cleanup */ JSStringRelease(js_script); }
165,523
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int is_integer(char *string) { if (isdigit(string[0]) || string[0] == '-' || string[0] == '+') { while (*++string && isdigit(*string)) ; /* deliberately empty */ if (!*string) return 1; } return 0; } Commit Message: Security fixes. - Don't overflow the small cs_start buffer (reported by Niels Thykier via the debian tracker (Jakub Wilk), found with a fuzzer ("American fuzzy lop")). - Cast arguments to <ctype.h> functions to unsigned char. CWE ID: CWE-119
static int is_integer(char *string) { if (isdigit((unsigned char) string[0]) || string[0] == '-' || string[0] == '+') { while (*++string && isdigit((unsigned char) *string)) ; /* deliberately empty */ if (!*string) return 1; } return 0; }
166,621
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) { inv_txfm_(out, dst, stride, tx_type_); } Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478 DO NOT MERGE - libvpx: Pull from upstream Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06 BUG=23452792 Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec CWE ID: CWE-119
void RunInvTxfm(int16_t *out, uint8_t *dst, int stride) { void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) { inv_txfm_(out, dst, stride, tx_type_); }
174,525
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans, const Id& id, const Cryptographer* cryptographer, StatusController* status) { MutableEntry entry(trans, syncable::GET_BY_ID, id); CHECK(entry.good()); if (!entry.Get(syncable::IS_UNAPPLIED_UPDATE) || !entry.Get(syncable::IS_UNSYNCED)) { return NO_SYNC_PROGRESS; } if (entry.Get(syncable::IS_DEL) && entry.Get(syncable::SERVER_IS_DEL)) { entry.Put(syncable::IS_UNSYNCED, false); entry.Put(syncable::IS_UNAPPLIED_UPDATE, false); return NO_SYNC_PROGRESS; } if (!entry.Get(syncable::SERVER_IS_DEL)) { bool name_matches = entry.Get(syncable::NON_UNIQUE_NAME) == entry.Get(syncable::SERVER_NON_UNIQUE_NAME); bool parent_matches = entry.Get(syncable::PARENT_ID) == entry.Get(syncable::SERVER_PARENT_ID); bool entry_deleted = entry.Get(syncable::IS_DEL); syncable::Id server_prev_id = entry.ComputePrevIdFromServerPosition( entry.Get(syncable::SERVER_PARENT_ID)); bool needs_reinsertion = !parent_matches || server_prev_id != entry.Get(syncable::PREV_ID); DVLOG_IF(1, needs_reinsertion) << "Insertion needed, server prev id " << " is " << server_prev_id << ", local prev id is " << entry.Get(syncable::PREV_ID); const sync_pb::EntitySpecifics& specifics = entry.Get(syncable::SPECIFICS); const sync_pb::EntitySpecifics& server_specifics = entry.Get(syncable::SERVER_SPECIFICS); const sync_pb::EntitySpecifics& base_server_specifics = entry.Get(syncable::BASE_SERVER_SPECIFICS); std::string decrypted_specifics, decrypted_server_specifics; bool specifics_match = false; bool server_encrypted_with_default_key = false; if (specifics.has_encrypted()) { DCHECK(cryptographer->CanDecryptUsingDefaultKey(specifics.encrypted())); decrypted_specifics = cryptographer->DecryptToString( specifics.encrypted()); } else { decrypted_specifics = specifics.SerializeAsString(); } if (server_specifics.has_encrypted()) { server_encrypted_with_default_key = cryptographer->CanDecryptUsingDefaultKey( server_specifics.encrypted()); decrypted_server_specifics = cryptographer->DecryptToString( server_specifics.encrypted()); } else { decrypted_server_specifics = server_specifics.SerializeAsString(); } if (decrypted_server_specifics == decrypted_specifics && server_encrypted_with_default_key == specifics.has_encrypted()) { specifics_match = true; } bool base_server_specifics_match = false; if (server_specifics.has_encrypted() && IsRealDataType(GetModelTypeFromSpecifics(base_server_specifics))) { std::string decrypted_base_server_specifics; if (!base_server_specifics.has_encrypted()) { decrypted_base_server_specifics = base_server_specifics.SerializeAsString(); } else { decrypted_base_server_specifics = cryptographer->DecryptToString( base_server_specifics.encrypted()); } if (decrypted_server_specifics == decrypted_base_server_specifics) base_server_specifics_match = true; } if (entry.GetModelType() == syncable::NIGORI) { sync_pb::EntitySpecifics specifics = entry.Get(syncable::SERVER_SPECIFICS); sync_pb::NigoriSpecifics* server_nigori = specifics.mutable_nigori(); cryptographer->UpdateNigoriFromEncryptedTypes(server_nigori); if (cryptographer->is_ready()) { cryptographer->GetKeys(server_nigori->mutable_encrypted()); server_nigori->set_using_explicit_passphrase( entry.Get(syncable::SPECIFICS).nigori(). using_explicit_passphrase()); } if (entry.Get(syncable::SPECIFICS).nigori().sync_tabs()) { server_nigori->set_sync_tabs(true); } entry.Put(syncable::SPECIFICS, specifics); DVLOG(1) << "Resolving simple conflict, merging nigori nodes: " << entry; status->increment_num_server_overwrites(); OverwriteServerChanges(trans, &entry); UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict", NIGORI_MERGE, CONFLICT_RESOLUTION_SIZE); } else if (!entry_deleted && name_matches && parent_matches && specifics_match && !needs_reinsertion) { DVLOG(1) << "Resolving simple conflict, everything matches, ignoring " << "changes for: " << entry; OverwriteServerChanges(trans, &entry); IgnoreLocalChanges(&entry); UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict", CHANGES_MATCH, CONFLICT_RESOLUTION_SIZE); } else if (base_server_specifics_match) { DVLOG(1) << "Resolving simple conflict, ignoring server encryption " << " changes for: " << entry; status->increment_num_server_overwrites(); OverwriteServerChanges(trans, &entry); UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict", IGNORE_ENCRYPTION, CONFLICT_RESOLUTION_SIZE); } else if (entry_deleted || !name_matches || !parent_matches) { OverwriteServerChanges(trans, &entry); status->increment_num_server_overwrites(); DVLOG(1) << "Resolving simple conflict, overwriting server changes " << "for: " << entry; UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict", OVERWRITE_SERVER, CONFLICT_RESOLUTION_SIZE); } else { DVLOG(1) << "Resolving simple conflict, ignoring local changes for: " << entry; IgnoreLocalChanges(&entry); status->increment_num_local_overwrites(); UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict", OVERWRITE_LOCAL, CONFLICT_RESOLUTION_SIZE); } entry.Put(syncable::BASE_SERVER_SPECIFICS, sync_pb::EntitySpecifics()); return SYNC_PROGRESS; } else { // SERVER_IS_DEL is true if (entry.Get(syncable::IS_DIR)) { Directory::ChildHandles children; trans->directory()->GetChildHandlesById(trans, entry.Get(syncable::ID), &children); if (0 != children.size()) { DVLOG(1) << "Entry is a server deleted directory with local contents, " << "should be a hierarchy conflict. (race condition)."; return NO_SYNC_PROGRESS; } } if (!entry.Get(syncable::UNIQUE_CLIENT_TAG).empty()) { DCHECK_EQ(entry.Get(syncable::SERVER_VERSION), 0) << "For the server to " "know to re-create, client-tagged items should revert to version 0 " "when server-deleted."; OverwriteServerChanges(trans, &entry); status->increment_num_server_overwrites(); DVLOG(1) << "Resolving simple conflict, undeleting server entry: " << entry; UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict", OVERWRITE_SERVER, CONFLICT_RESOLUTION_SIZE); entry.Put(syncable::SERVER_VERSION, 0); entry.Put(syncable::BASE_VERSION, 0); } else { SyncerUtil::SplitServerInformationIntoNewEntry(trans, &entry); MutableEntry server_update(trans, syncable::GET_BY_ID, id); CHECK(server_update.good()); CHECK(server_update.Get(syncable::META_HANDLE) != entry.Get(syncable::META_HANDLE)) << server_update << entry; UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict", UNDELETE, CONFLICT_RESOLUTION_SIZE); } return SYNC_PROGRESS; } } Commit Message: [Sync] Cleanup all tab sync enabling logic now that its on by default. BUG=none TEST= Review URL: https://chromiumcodereview.appspot.com/10443046 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@139462 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-362
ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans, const Id& id, const Cryptographer* cryptographer, StatusController* status) { MutableEntry entry(trans, syncable::GET_BY_ID, id); CHECK(entry.good()); if (!entry.Get(syncable::IS_UNAPPLIED_UPDATE) || !entry.Get(syncable::IS_UNSYNCED)) { return NO_SYNC_PROGRESS; } if (entry.Get(syncable::IS_DEL) && entry.Get(syncable::SERVER_IS_DEL)) { entry.Put(syncable::IS_UNSYNCED, false); entry.Put(syncable::IS_UNAPPLIED_UPDATE, false); return NO_SYNC_PROGRESS; } if (!entry.Get(syncable::SERVER_IS_DEL)) { bool name_matches = entry.Get(syncable::NON_UNIQUE_NAME) == entry.Get(syncable::SERVER_NON_UNIQUE_NAME); bool parent_matches = entry.Get(syncable::PARENT_ID) == entry.Get(syncable::SERVER_PARENT_ID); bool entry_deleted = entry.Get(syncable::IS_DEL); syncable::Id server_prev_id = entry.ComputePrevIdFromServerPosition( entry.Get(syncable::SERVER_PARENT_ID)); bool needs_reinsertion = !parent_matches || server_prev_id != entry.Get(syncable::PREV_ID); DVLOG_IF(1, needs_reinsertion) << "Insertion needed, server prev id " << " is " << server_prev_id << ", local prev id is " << entry.Get(syncable::PREV_ID); const sync_pb::EntitySpecifics& specifics = entry.Get(syncable::SPECIFICS); const sync_pb::EntitySpecifics& server_specifics = entry.Get(syncable::SERVER_SPECIFICS); const sync_pb::EntitySpecifics& base_server_specifics = entry.Get(syncable::BASE_SERVER_SPECIFICS); std::string decrypted_specifics, decrypted_server_specifics; bool specifics_match = false; bool server_encrypted_with_default_key = false; if (specifics.has_encrypted()) { DCHECK(cryptographer->CanDecryptUsingDefaultKey(specifics.encrypted())); decrypted_specifics = cryptographer->DecryptToString( specifics.encrypted()); } else { decrypted_specifics = specifics.SerializeAsString(); } if (server_specifics.has_encrypted()) { server_encrypted_with_default_key = cryptographer->CanDecryptUsingDefaultKey( server_specifics.encrypted()); decrypted_server_specifics = cryptographer->DecryptToString( server_specifics.encrypted()); } else { decrypted_server_specifics = server_specifics.SerializeAsString(); } if (decrypted_server_specifics == decrypted_specifics && server_encrypted_with_default_key == specifics.has_encrypted()) { specifics_match = true; } bool base_server_specifics_match = false; if (server_specifics.has_encrypted() && IsRealDataType(GetModelTypeFromSpecifics(base_server_specifics))) { std::string decrypted_base_server_specifics; if (!base_server_specifics.has_encrypted()) { decrypted_base_server_specifics = base_server_specifics.SerializeAsString(); } else { decrypted_base_server_specifics = cryptographer->DecryptToString( base_server_specifics.encrypted()); } if (decrypted_server_specifics == decrypted_base_server_specifics) base_server_specifics_match = true; } if (entry.GetModelType() == syncable::NIGORI) { sync_pb::EntitySpecifics specifics = entry.Get(syncable::SERVER_SPECIFICS); sync_pb::NigoriSpecifics* server_nigori = specifics.mutable_nigori(); cryptographer->UpdateNigoriFromEncryptedTypes(server_nigori); if (cryptographer->is_ready()) { cryptographer->GetKeys(server_nigori->mutable_encrypted()); server_nigori->set_using_explicit_passphrase( entry.Get(syncable::SPECIFICS).nigori(). using_explicit_passphrase()); } entry.Put(syncable::SPECIFICS, specifics); DVLOG(1) << "Resolving simple conflict, merging nigori nodes: " << entry; status->increment_num_server_overwrites(); OverwriteServerChanges(trans, &entry); UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict", NIGORI_MERGE, CONFLICT_RESOLUTION_SIZE); } else if (!entry_deleted && name_matches && parent_matches && specifics_match && !needs_reinsertion) { DVLOG(1) << "Resolving simple conflict, everything matches, ignoring " << "changes for: " << entry; OverwriteServerChanges(trans, &entry); IgnoreLocalChanges(&entry); UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict", CHANGES_MATCH, CONFLICT_RESOLUTION_SIZE); } else if (base_server_specifics_match) { DVLOG(1) << "Resolving simple conflict, ignoring server encryption " << " changes for: " << entry; status->increment_num_server_overwrites(); OverwriteServerChanges(trans, &entry); UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict", IGNORE_ENCRYPTION, CONFLICT_RESOLUTION_SIZE); } else if (entry_deleted || !name_matches || !parent_matches) { OverwriteServerChanges(trans, &entry); status->increment_num_server_overwrites(); DVLOG(1) << "Resolving simple conflict, overwriting server changes " << "for: " << entry; UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict", OVERWRITE_SERVER, CONFLICT_RESOLUTION_SIZE); } else { DVLOG(1) << "Resolving simple conflict, ignoring local changes for: " << entry; IgnoreLocalChanges(&entry); status->increment_num_local_overwrites(); UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict", OVERWRITE_LOCAL, CONFLICT_RESOLUTION_SIZE); } entry.Put(syncable::BASE_SERVER_SPECIFICS, sync_pb::EntitySpecifics()); return SYNC_PROGRESS; } else { // SERVER_IS_DEL is true if (entry.Get(syncable::IS_DIR)) { Directory::ChildHandles children; trans->directory()->GetChildHandlesById(trans, entry.Get(syncable::ID), &children); if (0 != children.size()) { DVLOG(1) << "Entry is a server deleted directory with local contents, " << "should be a hierarchy conflict. (race condition)."; return NO_SYNC_PROGRESS; } } if (!entry.Get(syncable::UNIQUE_CLIENT_TAG).empty()) { DCHECK_EQ(entry.Get(syncable::SERVER_VERSION), 0) << "For the server to " "know to re-create, client-tagged items should revert to version 0 " "when server-deleted."; OverwriteServerChanges(trans, &entry); status->increment_num_server_overwrites(); DVLOG(1) << "Resolving simple conflict, undeleting server entry: " << entry; UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict", OVERWRITE_SERVER, CONFLICT_RESOLUTION_SIZE); entry.Put(syncable::SERVER_VERSION, 0); entry.Put(syncable::BASE_VERSION, 0); } else { SyncerUtil::SplitServerInformationIntoNewEntry(trans, &entry); MutableEntry server_update(trans, syncable::GET_BY_ID, id); CHECK(server_update.good()); CHECK(server_update.Get(syncable::META_HANDLE) != entry.Get(syncable::META_HANDLE)) << server_update << entry; UMA_HISTOGRAM_ENUMERATION("Sync.ResolveSimpleConflict", UNDELETE, CONFLICT_RESOLUTION_SIZE); } return SYNC_PROGRESS; } }
170,791
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: stf_status ikev2parent_inI2outR2(struct msg_digest *md) { struct state *st = md->st; /* struct connection *c = st->st_connection; */ /* * the initiator sent us an encrypted payload. We need to calculate * our g^xy, and skeyseed values, and then decrypt the payload. */ DBG(DBG_CONTROLMORE, DBG_log( "ikev2 parent inI2outR2: calculating g^{xy} in order to decrypt I2")); /* verify that there is in fact an encrypted payload */ if (!md->chain[ISAKMP_NEXT_v2E]) { libreswan_log("R2 state should receive an encrypted payload"); reset_globals(); return STF_FATAL; } /* now. we need to go calculate the g^xy */ { struct dh_continuation *dh = alloc_thing( struct dh_continuation, "ikev2_inI2outR2 KE"); stf_status e; dh->md = md; set_suspended(st, dh->md); pcrc_init(&dh->dh_pcrc); dh->dh_pcrc.pcrc_func = ikev2_parent_inI2outR2_continue; e = start_dh_v2(&dh->dh_pcrc, st, st->st_import, RESPONDER, st->st_oakley.groupnum); if (e != STF_SUSPEND && e != STF_INLINE) { loglog(RC_CRYPTOFAILED, "system too busy"); delete_state(st); } reset_globals(); return e; } } Commit Message: SECURITY: Properly handle IKEv2 I1 notification packet without KE payload CWE ID: CWE-20
stf_status ikev2parent_inI2outR2(struct msg_digest *md) { struct state *st = md->st; /* struct connection *c = st->st_connection; */ /* * the initiator sent us an encrypted payload. We need to calculate * our g^xy, and skeyseed values, and then decrypt the payload. */ DBG(DBG_CONTROLMORE, DBG_log( "ikev2 parent inI2outR2: calculating g^{xy} in order to decrypt I2")); /* verify that there is in fact an encrypted payload */ if (!md->chain[ISAKMP_NEXT_v2E]) { libreswan_log("R2 state should receive an encrypted payload"); reset_globals(); /* XXX suspicious - why was this deemed neccessary? */ return STF_FATAL; } /* now. we need to go calculate the g^xy */ { struct dh_continuation *dh = alloc_thing( struct dh_continuation, "ikev2_inI2outR2 KE"); stf_status e; dh->md = md; set_suspended(st, dh->md); pcrc_init(&dh->dh_pcrc); dh->dh_pcrc.pcrc_func = ikev2_parent_inI2outR2_continue; e = start_dh_v2(&dh->dh_pcrc, st, st->st_import, RESPONDER, st->st_oakley.groupnum); if (e != STF_SUSPEND && e != STF_INLINE) { loglog(RC_CRYPTOFAILED, "system too busy"); delete_state(st); } reset_globals(); return e; } }
166,475
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: gss_wrap_size_limit(OM_uint32 *minor_status, gss_ctx_id_t context_handle, int conf_req_flag, gss_qop_t qop_req, OM_uint32 req_output_size, OM_uint32 *max_input_size) { gss_union_ctx_id_t ctx; gss_mechanism mech; OM_uint32 major_status; if (minor_status == NULL) return (GSS_S_CALL_INACCESSIBLE_WRITE); *minor_status = 0; if (context_handle == GSS_C_NO_CONTEXT) return (GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT); if (max_input_size == NULL) return (GSS_S_CALL_INACCESSIBLE_WRITE); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t) context_handle; mech = gssint_get_mechanism (ctx->mech_type); if (!mech) return (GSS_S_BAD_MECH); if (mech->gss_wrap_size_limit) major_status = mech->gss_wrap_size_limit(minor_status, ctx->internal_ctx_id, conf_req_flag, qop_req, req_output_size, max_input_size); else if (mech->gss_wrap_iov_length) major_status = gssint_wrap_size_limit_iov_shim(mech, minor_status, ctx->internal_ctx_id, conf_req_flag, qop_req, req_output_size, max_input_size); else major_status = GSS_S_UNAVAILABLE; if (major_status != GSS_S_COMPLETE) map_error(minor_status, mech); return major_status; } Commit Message: Preserve GSS context on init/accept failure After gss_init_sec_context() or gss_accept_sec_context() has created a context, don't delete the mechglue context on failures from subsequent calls, even if the mechanism deletes the mech-specific context (which is allowed by RFC 2744 but not preferred). Check for union contexts with no mechanism context in each GSS function which accepts a gss_ctx_id_t. CVE-2017-11462: RFC 2744 permits a GSS-API implementation to delete an existing security context on a second or subsequent call to gss_init_sec_context() or gss_accept_sec_context() if the call results in an error. This API behavior has been found to be dangerous, leading to the possibility of memory errors in some callers. For safety, GSS-API implementations should instead preserve existing security contexts on error until the caller deletes them. All versions of MIT krb5 prior to this change may delete acceptor contexts on error. Versions 1.13.4 through 1.13.7, 1.14.1 through 1.14.5, and 1.15 through 1.15.1 may also delete initiator contexts on error. ticket: 8598 (new) target_version: 1.15-next target_version: 1.14-next tags: pullup CWE ID: CWE-415
gss_wrap_size_limit(OM_uint32 *minor_status, gss_ctx_id_t context_handle, int conf_req_flag, gss_qop_t qop_req, OM_uint32 req_output_size, OM_uint32 *max_input_size) { gss_union_ctx_id_t ctx; gss_mechanism mech; OM_uint32 major_status; if (minor_status == NULL) return (GSS_S_CALL_INACCESSIBLE_WRITE); *minor_status = 0; if (context_handle == GSS_C_NO_CONTEXT) return (GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT); if (max_input_size == NULL) return (GSS_S_CALL_INACCESSIBLE_WRITE); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (!mech) return (GSS_S_BAD_MECH); if (mech->gss_wrap_size_limit) major_status = mech->gss_wrap_size_limit(minor_status, ctx->internal_ctx_id, conf_req_flag, qop_req, req_output_size, max_input_size); else if (mech->gss_wrap_iov_length) major_status = gssint_wrap_size_limit_iov_shim(mech, minor_status, ctx->internal_ctx_id, conf_req_flag, qop_req, req_output_size, max_input_size); else major_status = GSS_S_UNAVAILABLE; if (major_status != GSS_S_COMPLETE) map_error(minor_status, mech); return major_status; }
168,021
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void RenderWidgetHostViewAura::AdjustSurfaceProtection() { bool surface_is_protected = current_surface_ || !host_->is_hidden() || (current_surface_is_protected_ && (pending_thumbnail_tasks_ > 0 || current_surface_in_use_by_compositor_)); if (current_surface_is_protected_ == surface_is_protected) return; current_surface_is_protected_ = surface_is_protected; ++protection_state_id_; if (!surface_route_id_ || !shared_surface_handle_.parent_gpu_process_id) return; RenderWidgetHostImpl::SendFrontSurfaceIsProtected( surface_is_protected, protection_state_id_, surface_route_id_, shared_surface_handle_.parent_gpu_process_id); } Commit Message: Implement TextureImageTransportSurface using texture mailbox This has a couple of advantages: - allow tearing down and recreating the UI parent context without losing the renderer contexts - do not require a context to be able to generate textures when creating the GLSurfaceHandle - clearer ownership semantics that potentially allows for more robust and easier lost context handling/thumbnailing/etc., since a texture is at any given time owned by either: UI parent, mailbox, or TextureImageTransportSurface - simplify frontbuffer protection logic; the frontbuffer textures are now owned by RWHV where they are refcounted The TextureImageTransportSurface informs RenderWidgetHostView of the mailbox names for the front- and backbuffer textures by associating them with a surface_handle (1 or 2) in the AcceleratedSurfaceNew message. During SwapBuffers() or PostSubBuffer() cycles, it then uses produceTextureCHROMIUM() and consumeTextureCHROMIUM() to transfer ownership between renderer and browser compositor. RWHV sends back the surface_handle of the buffer being returned with the Swap ACK (or 0 if no buffer is being returned in which case TextureImageTransportSurface will allocate a new texture - note that this could be used to simply keep textures for thumbnailing). BUG=154815,139616 [email protected] Review URL: https://chromiumcodereview.appspot.com/11194042 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@171569 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID:
void RenderWidgetHostViewAura::AdjustSurfaceProtection() { void RenderWidgetHostViewAura::SetSurfaceNotInUseByCompositor( scoped_refptr<ui::Texture>) { }
171,376
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int sock_send_fd(int sock_fd, const uint8_t* buf, int len, int send_fd) { ssize_t ret; struct msghdr msg; unsigned char *buffer = (unsigned char *)buf; memset(&msg, 0, sizeof(msg)); struct cmsghdr *cmsg; char msgbuf[CMSG_SPACE(1)]; asrt(send_fd != -1); if(sock_fd == -1 || send_fd == -1) return -1; msg.msg_control = msgbuf; msg.msg_controllen = sizeof msgbuf; cmsg = CMSG_FIRSTHDR(&msg); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; cmsg->cmsg_len = CMSG_LEN(sizeof send_fd); memcpy(CMSG_DATA(cmsg), &send_fd, sizeof send_fd); int ret_len = len; while (len > 0) { struct iovec iv; memset(&iv, 0, sizeof(iv)); iv.iov_base = buffer; iv.iov_len = len; msg.msg_iov = &iv; msg.msg_iovlen = 1; do { ret = sendmsg(sock_fd, &msg, MSG_NOSIGNAL); } while (ret < 0 && errno == EINTR); if (ret < 0) { BTIF_TRACE_ERROR("fd:%d, send_fd:%d, sendmsg ret:%d, errno:%d, %s", sock_fd, send_fd, (int)ret, errno, strerror(errno)); ret_len = -1; break; } buffer += ret; len -= ret; memset(&msg, 0, sizeof(msg)); } BTIF_TRACE_DEBUG("close fd:%d after sent", send_fd); close(send_fd); return ret_len; } Commit Message: DO NOT MERGE Fix potential DoS caused by delivering signal to BT process Bug: 28885210 Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360 Conflicts: btif/co/bta_hh_co.c btif/src/btif_core.c Merge conflict resolution of ag/1161415 (referencing ag/1164670) - Directly into mnc-mr2-release CWE ID: CWE-284
int sock_send_fd(int sock_fd, const uint8_t* buf, int len, int send_fd) { ssize_t ret; struct msghdr msg; unsigned char *buffer = (unsigned char *)buf; memset(&msg, 0, sizeof(msg)); struct cmsghdr *cmsg; char msgbuf[CMSG_SPACE(1)]; asrt(send_fd != -1); if(sock_fd == -1 || send_fd == -1) return -1; msg.msg_control = msgbuf; msg.msg_controllen = sizeof msgbuf; cmsg = CMSG_FIRSTHDR(&msg); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; cmsg->cmsg_len = CMSG_LEN(sizeof send_fd); memcpy(CMSG_DATA(cmsg), &send_fd, sizeof send_fd); int ret_len = len; while (len > 0) { struct iovec iv; memset(&iv, 0, sizeof(iv)); iv.iov_base = buffer; iv.iov_len = len; msg.msg_iov = &iv; msg.msg_iovlen = 1; do { ret = TEMP_FAILURE_RETRY(sendmsg(sock_fd, &msg, MSG_NOSIGNAL)); } while (ret < 0 && errno == EINTR); if (ret < 0) { BTIF_TRACE_ERROR("fd:%d, send_fd:%d, sendmsg ret:%d, errno:%d, %s", sock_fd, send_fd, (int)ret, errno, strerror(errno)); ret_len = -1; break; } buffer += ret; len -= ret; memset(&msg, 0, sizeof(msg)); } BTIF_TRACE_DEBUG("close fd:%d after sent", send_fd); close(send_fd); return ret_len; }
173,470
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } CheckNumberCompactPixels; compact_pixels++; } } return(i); } Commit Message: Moved check for https://github.com/ImageMagick/ImageMagick/issues/92. CWE ID: CWE-125
static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); }
168,806
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static rsRetVal createSocket(instanceConf_t* info, void** sock) { int rv; sublist* sub; *sock = zsocket_new(s_context, info->type); if (!sock) { errmsg.LogError(0, RS_RET_INVALID_PARAMS, "zsocket_new failed: %s, for type %d", zmq_strerror(errno),info->type); /* DK: invalid params seems right here */ return RS_RET_INVALID_PARAMS; } DBGPRINTF("imzmq3: socket of type %d created successfully\n", info->type) /* Set options *before* the connect/bind. */ if (info->identity) zsocket_set_identity(*sock, info->identity); if (info->sndBuf > -1) zsocket_set_sndbuf(*sock, info->sndBuf); if (info->rcvBuf > -1) zsocket_set_rcvbuf(*sock, info->rcvBuf); if (info->linger > -1) zsocket_set_linger(*sock, info->linger); if (info->backlog > -1) zsocket_set_backlog(*sock, info->backlog); if (info->sndTimeout > -1) zsocket_set_sndtimeo(*sock, info->sndTimeout); if (info->rcvTimeout > -1) zsocket_set_rcvtimeo(*sock, info->rcvTimeout); if (info->maxMsgSize > -1) zsocket_set_maxmsgsize(*sock, info->maxMsgSize); if (info->rate > -1) zsocket_set_rate(*sock, info->rate); if (info->recoveryIVL > -1) zsocket_set_recovery_ivl(*sock, info->recoveryIVL); if (info->multicastHops > -1) zsocket_set_multicast_hops(*sock, info->multicastHops); if (info->reconnectIVL > -1) zsocket_set_reconnect_ivl(*sock, info->reconnectIVL); if (info->reconnectIVLMax > -1) zsocket_set_reconnect_ivl_max(*sock, info->reconnectIVLMax); if (info->ipv4Only > -1) zsocket_set_ipv4only(*sock, info->ipv4Only); if (info->affinity > -1) zsocket_set_affinity(*sock, info->affinity); if (info->sndHWM > -1 ) zsocket_set_sndhwm(*sock, info->sndHWM); if (info->rcvHWM > -1 ) zsocket_set_rcvhwm(*sock, info->rcvHWM); /* Set subscriptions.*/ if (info->type == ZMQ_SUB) { for(sub = info->subscriptions; sub!=NULL; sub=sub->next) { zsocket_set_subscribe(*sock, sub->subscribe); } } /* Do the bind/connect... */ if (info->action==ACTION_CONNECT) { rv = zsocket_connect(*sock, info->description); if (rv == -1) { errmsg.LogError(0, RS_RET_INVALID_PARAMS, "zmq_connect using %s failed: %s", info->description, zmq_strerror(errno)); return RS_RET_INVALID_PARAMS; } DBGPRINTF("imzmq3: connect for %s successful\n",info->description); } else { rv = zsocket_bind(*sock, info->description); if (rv == -1) { errmsg.LogError(0, RS_RET_INVALID_PARAMS, "zmq_bind using %s failed: %s", info->description, zmq_strerror(errno)); return RS_RET_INVALID_PARAMS; } DBGPRINTF("imzmq3: bind for %s successful\n",info->description); } return RS_RET_OK; } Commit Message: Merge pull request #1565 from Whissi/fix-format-security-issue-in-zmq-modules Fix format security issue in zmq3 modules CWE ID: CWE-134
static rsRetVal createSocket(instanceConf_t* info, void** sock) { int rv; sublist* sub; *sock = zsocket_new(s_context, info->type); if (!sock) { errmsg.LogError(0, RS_RET_INVALID_PARAMS, "zsocket_new failed: %s, for type %d", zmq_strerror(errno),info->type); /* DK: invalid params seems right here */ return RS_RET_INVALID_PARAMS; } DBGPRINTF("imzmq3: socket of type %d created successfully\n", info->type) /* Set options *before* the connect/bind. */ if (info->identity) zsocket_set_identity(*sock, info->identity); if (info->sndBuf > -1) zsocket_set_sndbuf(*sock, info->sndBuf); if (info->rcvBuf > -1) zsocket_set_rcvbuf(*sock, info->rcvBuf); if (info->linger > -1) zsocket_set_linger(*sock, info->linger); if (info->backlog > -1) zsocket_set_backlog(*sock, info->backlog); if (info->sndTimeout > -1) zsocket_set_sndtimeo(*sock, info->sndTimeout); if (info->rcvTimeout > -1) zsocket_set_rcvtimeo(*sock, info->rcvTimeout); if (info->maxMsgSize > -1) zsocket_set_maxmsgsize(*sock, info->maxMsgSize); if (info->rate > -1) zsocket_set_rate(*sock, info->rate); if (info->recoveryIVL > -1) zsocket_set_recovery_ivl(*sock, info->recoveryIVL); if (info->multicastHops > -1) zsocket_set_multicast_hops(*sock, info->multicastHops); if (info->reconnectIVL > -1) zsocket_set_reconnect_ivl(*sock, info->reconnectIVL); if (info->reconnectIVLMax > -1) zsocket_set_reconnect_ivl_max(*sock, info->reconnectIVLMax); if (info->ipv4Only > -1) zsocket_set_ipv4only(*sock, info->ipv4Only); if (info->affinity > -1) zsocket_set_affinity(*sock, info->affinity); if (info->sndHWM > -1 ) zsocket_set_sndhwm(*sock, info->sndHWM); if (info->rcvHWM > -1 ) zsocket_set_rcvhwm(*sock, info->rcvHWM); /* Set subscriptions.*/ if (info->type == ZMQ_SUB) { for(sub = info->subscriptions; sub!=NULL; sub=sub->next) { zsocket_set_subscribe(*sock, sub->subscribe); } } /* Do the bind/connect... */ if (info->action==ACTION_CONNECT) { rv = zsocket_connect(*sock, "%s", info->description); if (rv == -1) { errmsg.LogError(0, RS_RET_INVALID_PARAMS, "zmq_connect using %s failed: %s", info->description, zmq_strerror(errno)); return RS_RET_INVALID_PARAMS; } DBGPRINTF("imzmq3: connect for %s successful\n",info->description); } else { rv = zsocket_bind(*sock, "%s", info->description); if (rv == -1) { errmsg.LogError(0, RS_RET_INVALID_PARAMS, "zmq_bind using %s failed: %s", info->description, zmq_strerror(errno)); return RS_RET_INVALID_PARAMS; } DBGPRINTF("imzmq3: bind for %s successful\n",info->description); } return RS_RET_OK; }
167,983
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int MOD_EXP_CTIME_COPY_TO_PREBUF(const BIGNUM *b, int top, unsigned char *buf, int idx, int width) { size_t i, j; if (top > b->top) top = b->top; /* this works because 'buf' is explicitly * zeroed */ for (i = 0, j = idx; i < top * sizeof b->d[0]; i++, j += width) { buf[j] = ((unsigned char *)b->d)[i]; } return 1; unsigned char *buf, int idx, static int MOD_EXP_CTIME_COPY_FROM_PREBUF(BIGNUM *b, int top, unsigned char *buf, int idx, int width) { size_t i, j; if (bn_wexpand(b, top) == NULL) return 0; for (i = 0, j = idx; i < top * sizeof b->d[0]; i++, j += width) { ((unsigned char *)b->d)[i] = buf[j]; } b->top = top; if (!BN_is_odd(m)) { BNerr(BN_F_BN_MOD_EXP_MONT_CONSTTIME, BN_R_CALLED_WITH_EVEN_MODULUS); return (0); } top = m->top; bits = BN_num_bits(p); if (bits == 0) { /* x**0 mod 1 is still zero. */ if (BN_is_one(m)) { ret = 1; BN_zero(rr); } else { ret = BN_one(rr); } return ret; } BN_CTX_start(ctx); /* * Allocate a montgomery context if it was not supplied by the caller. If * this is not done, things will break in the montgomery part. */ if (in_mont != NULL) mont = in_mont; else { if ((mont = BN_MONT_CTX_new()) == NULL) goto err; if (!BN_MONT_CTX_set(mont, m, ctx)) goto err; } #ifdef RSAZ_ENABLED /* * If the size of the operands allow it, perform the optimized * RSAZ exponentiation. For further information see * crypto/bn/rsaz_exp.c and accompanying assembly modules. */ if ((16 == a->top) && (16 == p->top) && (BN_num_bits(m) == 1024) && rsaz_avx2_eligible()) { if (NULL == bn_wexpand(rr, 16)) goto err; RSAZ_1024_mod_exp_avx2(rr->d, a->d, p->d, m->d, mont->RR.d, mont->n0[0]); rr->top = 16; rr->neg = 0; bn_correct_top(rr); ret = 1; goto err; } else if ((8 == a->top) && (8 == p->top) && (BN_num_bits(m) == 512)) { if (NULL == bn_wexpand(rr, 8)) goto err; RSAZ_512_mod_exp(rr->d, a->d, p->d, m->d, mont->n0[0], mont->RR.d); rr->top = 8; rr->neg = 0; bn_correct_top(rr); ret = 1; goto err; } #endif /* Get the window size to use with size of p. */ window = BN_window_bits_for_ctime_exponent_size(bits); #if defined(SPARC_T4_MONT) if (window >= 5 && (top & 15) == 0 && top <= 64 && (OPENSSL_sparcv9cap_P[1] & (CFR_MONTMUL | CFR_MONTSQR)) == (CFR_MONTMUL | CFR_MONTSQR) && (t4 = OPENSSL_sparcv9cap_P[0])) window = 5; else #endif #if defined(OPENSSL_BN_ASM_MONT5) if (window >= 5) { window = 5; /* ~5% improvement for RSA2048 sign, and even * for RSA4096 */ if ((top & 7) == 0) powerbufLen += 2 * top * sizeof(m->d[0]); } #endif (void)0; /* * Allocate a buffer large enough to hold all of the pre-computed powers * of am, am itself and tmp. */ numPowers = 1 << window; powerbufLen += sizeof(m->d[0]) * (top * numPowers + ((2 * top) > numPowers ? (2 * top) : numPowers)); #ifdef alloca if (powerbufLen < 3072) powerbufFree = alloca(powerbufLen + MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH); else #endif if ((powerbufFree = (unsigned char *)OPENSSL_malloc(powerbufLen + MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH)) == NULL) goto err; powerbuf = MOD_EXP_CTIME_ALIGN(powerbufFree); memset(powerbuf, 0, powerbufLen); #ifdef alloca if (powerbufLen < 3072) powerbufFree = NULL; #endif /* lay down tmp and am right after powers table */ tmp.d = (BN_ULONG *)(powerbuf + sizeof(m->d[0]) * top * numPowers); am.d = tmp.d + top; tmp.top = am.top = 0; tmp.dmax = am.dmax = top; tmp.neg = am.neg = 0; tmp.flags = am.flags = BN_FLG_STATIC_DATA; /* prepare a^0 in Montgomery domain */ #if 1 /* by Shay Gueron's suggestion */ if (m->d[top - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) { /* 2^(top*BN_BITS2) - m */ tmp.d[0] = (0 - m->d[0]) & BN_MASK2; for (i = 1; i < top; i++) tmp.d[i] = (~m->d[i]) & BN_MASK2; tmp.top = top; } else #endif if (!BN_to_montgomery(&tmp, BN_value_one(), mont, ctx)) goto err; /* prepare a^1 in Montgomery domain */ if (a->neg || BN_ucmp(a, m) >= 0) { if (!BN_mod(&am, a, m, ctx)) goto err; if (!BN_to_montgomery(&am, &am, mont, ctx)) goto err; } else if (!BN_to_montgomery(&am, a, mont, ctx)) goto err; #if defined(SPARC_T4_MONT) if (t4) { typedef int (*bn_pwr5_mont_f) (BN_ULONG *tp, const BN_ULONG *np, const BN_ULONG *n0, const void *table, int power, int bits); int bn_pwr5_mont_t4_8(BN_ULONG *tp, const BN_ULONG *np, const BN_ULONG *n0, const void *table, int power, int bits); int bn_pwr5_mont_t4_16(BN_ULONG *tp, const BN_ULONG *np, const BN_ULONG *n0, const void *table, int power, int bits); int bn_pwr5_mont_t4_24(BN_ULONG *tp, const BN_ULONG *np, const BN_ULONG *n0, const void *table, int power, int bits); int bn_pwr5_mont_t4_32(BN_ULONG *tp, const BN_ULONG *np, const BN_ULONG *n0, const void *table, int power, int bits); static const bn_pwr5_mont_f pwr5_funcs[4] = { bn_pwr5_mont_t4_8, bn_pwr5_mont_t4_16, bn_pwr5_mont_t4_24, bn_pwr5_mont_t4_32 }; bn_pwr5_mont_f pwr5_worker = pwr5_funcs[top / 16 - 1]; typedef int (*bn_mul_mont_f) (BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0); int bn_mul_mont_t4_8(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0); int bn_mul_mont_t4_16(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0); int bn_mul_mont_t4_24(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0); int bn_mul_mont_t4_32(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0); static const bn_mul_mont_f mul_funcs[4] = { bn_mul_mont_t4_8, bn_mul_mont_t4_16, bn_mul_mont_t4_24, bn_mul_mont_t4_32 }; bn_mul_mont_f mul_worker = mul_funcs[top / 16 - 1]; void bn_mul_mont_vis3(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0, int num); void bn_mul_mont_t4(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0, int num); void bn_mul_mont_gather5_t4(BN_ULONG *rp, const BN_ULONG *ap, const void *table, const BN_ULONG *np, const BN_ULONG *n0, int num, int power); void bn_flip_n_scatter5_t4(const BN_ULONG *inp, size_t num, void *table, size_t power); void bn_gather5_t4(BN_ULONG *out, size_t num, void *table, size_t power); void bn_flip_t4(BN_ULONG *dst, BN_ULONG *src, size_t num); BN_ULONG *np = mont->N.d, *n0 = mont->n0; int stride = 5 * (6 - (top / 16 - 1)); /* multiple of 5, but less * than 32 */ /* * BN_to_montgomery can contaminate words above .top [in * BN_DEBUG[_DEBUG] build]... */ for (i = am.top; i < top; i++) am.d[i] = 0; for (i = tmp.top; i < top; i++) tmp.d[i] = 0; bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 0); bn_flip_n_scatter5_t4(am.d, top, powerbuf, 1); if (!(*mul_worker) (tmp.d, am.d, am.d, np, n0) && !(*mul_worker) (tmp.d, am.d, am.d, np, n0)) bn_mul_mont_vis3(tmp.d, am.d, am.d, np, n0, top); bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 2); for (i = 3; i < 32; i++) { /* Calculate a^i = a^(i-1) * a */ if (!(*mul_worker) (tmp.d, tmp.d, am.d, np, n0) && !(*mul_worker) (tmp.d, tmp.d, am.d, np, n0)) bn_mul_mont_vis3(tmp.d, tmp.d, am.d, np, n0, top); bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, i); } /* switch to 64-bit domain */ np = alloca(top * sizeof(BN_ULONG)); top /= 2; bn_flip_t4(np, mont->N.d, top); bits--; for (wvalue = 0, i = bits % 5; i >= 0; i--, bits--) wvalue = (wvalue << 1) + BN_is_bit_set(p, bits); bn_gather5_t4(tmp.d, top, powerbuf, wvalue); /* * Scan the exponent one window at a time starting from the most * significant bits. */ while (bits >= 0) { if (bits < stride) stride = bits + 1; bits -= stride; wvalue = bn_get_bits(p, bits + 1); if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride)) continue; /* retry once and fall back */ if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride)) continue; bits += stride - 5; wvalue >>= stride - 5; wvalue &= 31; bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont_gather5_t4(tmp.d, tmp.d, powerbuf, np, n0, top, wvalue); } bn_flip_t4(tmp.d, tmp.d, top); top *= 2; /* back to 32-bit domain */ tmp.top = top; bn_correct_top(&tmp); OPENSSL_cleanse(np, top * sizeof(BN_ULONG)); } else #endif #if defined(OPENSSL_BN_ASM_MONT5) if (window == 5 && top > 1) { /* * This optimization uses ideas from http://eprint.iacr.org/2011/239, * specifically optimization of cache-timing attack countermeasures * and pre-computation optimization. */ /* * Dedicated window==4 case improves 512-bit RSA sign by ~15%, but as * 512-bit RSA is hardly relevant, we omit it to spare size... */ void bn_mul_mont_gather5(BN_ULONG *rp, const BN_ULONG *ap, const void *table, const BN_ULONG *np, const BN_ULONG *n0, int num, int power); void bn_scatter5(const BN_ULONG *inp, size_t num, void *table, size_t power); void bn_gather5(BN_ULONG *out, size_t num, void *table, size_t power); void bn_power5(BN_ULONG *rp, const BN_ULONG *ap, const void *table, const BN_ULONG *np, const BN_ULONG *n0, int num, int power); int bn_get_bits5(const BN_ULONG *ap, int off); int bn_from_montgomery(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *not_used, const BN_ULONG *np, const BN_ULONG *n0, int num); BN_ULONG *np = mont->N.d, *n0 = mont->n0, *np2; /* * BN_to_montgomery can contaminate words above .top [in * BN_DEBUG[_DEBUG] build]... */ for (i = am.top; i < top; i++) am.d[i] = 0; for (i = tmp.top; i < top; i++) tmp.d[i] = 0; if (top & 7) np2 = np; else for (np2 = am.d + top, i = 0; i < top; i++) np2[2 * i] = np[i]; bn_scatter5(tmp.d, top, powerbuf, 0); bn_scatter5(am.d, am.top, powerbuf, 1); bn_mul_mont(tmp.d, am.d, am.d, np, n0, top); bn_scatter5(tmp.d, top, powerbuf, 2); # if 0 for (i = 3; i < 32; i++) { /* Calculate a^i = a^(i-1) * a */ bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np2, n0, top, i - 1); bn_scatter5(tmp.d, top, powerbuf, i); } # else /* same as above, but uses squaring for 1/2 of operations */ for (i = 4; i < 32; i *= 2) { bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_scatter5(tmp.d, top, powerbuf, i); } for (i = 3; i < 8; i += 2) { int j; bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np2, n0, top, i - 1); bn_scatter5(tmp.d, top, powerbuf, i); for (j = 2 * i; j < 32; j *= 2) { bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_scatter5(tmp.d, top, powerbuf, j); } } for (; i < 16; i += 2) { bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np2, n0, top, i - 1); bn_scatter5(tmp.d, top, powerbuf, i); bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_scatter5(tmp.d, top, powerbuf, 2 * i); } for (; i < 32; i += 2) { bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np2, n0, top, i - 1); bn_scatter5(tmp.d, top, powerbuf, i); } # endif bits--; for (wvalue = 0, i = bits % 5; i >= 0; i--, bits--) wvalue = (wvalue << 1) + BN_is_bit_set(p, bits); bn_gather5(tmp.d, top, powerbuf, wvalue); /* * Scan the exponent one window at a time starting from the most * significant bits. */ if (top & 7) while (bits >= 0) { for (wvalue = 0, i = 0; i < 5; i++, bits--) wvalue = (wvalue << 1) + BN_is_bit_set(p, bits); bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont_gather5(tmp.d, tmp.d, powerbuf, np, n0, top, wvalue); } else { while (bits >= 0) { wvalue = bn_get_bits5(p->d, bits - 4); bits -= 5; bn_power5(tmp.d, tmp.d, powerbuf, np2, n0, top, wvalue); } } ret = bn_from_montgomery(tmp.d, tmp.d, NULL, np2, n0, top); tmp.top = top; bn_correct_top(&tmp); if (ret) { if (!BN_copy(rr, &tmp)) ret = 0; goto err; /* non-zero ret means it's not error */ } } else #endif { if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 0, numPowers)) goto err; if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&am, top, powerbuf, 1, numPowers)) goto err; /* * If the window size is greater than 1, then calculate * val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1) (even * powers could instead be computed as (a^(i/2))^2 to use the slight * performance advantage of sqr over mul). */ if (window > 1) { if (!BN_mod_mul_montgomery(&tmp, &am, &am, mont, ctx)) goto err; if (!MOD_EXP_CTIME_COPY_TO_PREBUF (&tmp, top, powerbuf, 2, numPowers)) goto err; for (i = 3; i < numPowers; i++) { /* Calculate a^i = a^(i-1) * a */ if (!BN_mod_mul_montgomery(&tmp, &am, &tmp, mont, ctx)) goto err; if (!MOD_EXP_CTIME_COPY_TO_PREBUF (&tmp, top, powerbuf, i, numPowers)) goto err; } } bits--; for (wvalue = 0, i = bits % window; i >= 0; i--, bits--) wvalue = (wvalue << 1) + BN_is_bit_set(p, bits); if (!MOD_EXP_CTIME_COPY_FROM_PREBUF (&tmp, top, powerbuf, wvalue, numPowers)) goto err; /* * Scan the exponent one window at a time starting from the most } else #endif { if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 0, numPowers)) goto err; if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&am, top, powerbuf, 1, numPowers)) goto err; /* wvalue = (wvalue << 1) + BN_is_bit_set(p, bits); } /* * Fetch the appropriate pre-computed value from the pre-buf if (window > 1) { if (!BN_mod_mul_montgomery(&tmp, &am, &am, mont, ctx)) goto err; if (!MOD_EXP_CTIME_COPY_TO_PREBUF (&tmp, top, powerbuf, 2, numPowers)) goto err; for (i = 3; i < numPowers; i++) { /* Calculate a^i = a^(i-1) * a */ if (!BN_mod_mul_montgomery(&tmp, &am, &tmp, mont, ctx)) goto err; if (!MOD_EXP_CTIME_COPY_TO_PREBUF (&tmp, top, powerbuf, i, numPowers)) goto err; } } for (i = 1; i < top; i++) bits--; for (wvalue = 0, i = bits % window; i >= 0; i--, bits--) wvalue = (wvalue << 1) + BN_is_bit_set(p, bits); if (!MOD_EXP_CTIME_COPY_FROM_PREBUF (&tmp, top, powerbuf, wvalue, numPowers)) goto err; /* err: if ((in_mont == NULL) && (mont != NULL)) BN_MONT_CTX_free(mont); if (powerbuf != NULL) { OPENSSL_cleanse(powerbuf, powerbufLen); if (powerbufFree) OPENSSL_free(powerbufFree); } BN_CTX_end(ctx); return (ret); } int BN_mod_exp_mont_word(BIGNUM *rr, BN_ULONG a, const BIGNUM *p, /* * Fetch the appropriate pre-computed value from the pre-buf */ if (!MOD_EXP_CTIME_COPY_FROM_PREBUF (&am, top, powerbuf, wvalue, numPowers)) goto err; /* Multiply the result into the intermediate result */ #define BN_MOD_MUL_WORD(r, w, m) \ (BN_mul_word(r, (w)) && \ (/* BN_ucmp(r, (m)) < 0 ? 1 :*/ \ (BN_mod(t, r, m, ctx) && (swap_tmp = r, r = t, t = swap_tmp, 1)))) /* * BN_MOD_MUL_WORD is only used with 'w' large, so the BN_ucmp test is * probably more overhead than always using BN_mod (which uses BN_copy if * a similar test returns true). */ /* * We can use BN_mod and do not need BN_nnmod because our accumulator is * never negative (the result of BN_mod does not depend on the sign of * the modulus). */ #define BN_TO_MONTGOMERY_WORD(r, w, mont) \ (BN_set_word(r, (w)) && BN_to_montgomery(r, r, (mont), ctx)) if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0) { /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */ BNerr(BN_F_BN_MOD_EXP_MONT_WORD, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return -1; } bn_check_top(p); bn_check_top(m); if (!BN_is_odd(m)) { BNerr(BN_F_BN_MOD_EXP_MONT_WORD, BN_R_CALLED_WITH_EVEN_MODULUS); return (0); } if (m->top == 1) a %= m->d[0]; /* make sure that 'a' is reduced */ bits = BN_num_bits(p); if (bits == 0) { /* x**0 mod 1 is still zero. */ if (BN_is_one(m)) { ret = 1; BN_zero(rr); } else { ret = BN_one(rr); } return ret; } if (a == 0) { BN_zero(rr); ret = 1; return ret; } BN_CTX_start(ctx); d = BN_CTX_get(ctx); r = BN_CTX_get(ctx); t = BN_CTX_get(ctx); if (d == NULL || r == NULL || t == NULL) goto err; if (in_mont != NULL) mont = in_mont; else { if ((mont = BN_MONT_CTX_new()) == NULL) goto err; if (!BN_MONT_CTX_set(mont, m, ctx)) goto err; } r_is_one = 1; /* except for Montgomery factor */ /* bits-1 >= 0 */ /* The result is accumulated in the product r*w. */ w = a; /* bit 'bits-1' of 'p' is always set */ for (b = bits - 2; b >= 0; b--) { /* First, square r*w. */ next_w = w * w; if ((next_w / w) != w) { /* overflow */ if (r_is_one) { if (!BN_TO_MONTGOMERY_WORD(r, w, mont)) goto err; r_is_one = 0; } else { if (!BN_MOD_MUL_WORD(r, w, m)) goto err; } next_w = 1; } w = next_w; if (!r_is_one) { if (!BN_mod_mul_montgomery(r, r, r, mont, ctx)) goto err; } /* Second, multiply r*w by 'a' if exponent bit is set. */ if (BN_is_bit_set(p, b)) { next_w = w * a; if ((next_w / a) != w) { /* overflow */ if (r_is_one) { if (!BN_TO_MONTGOMERY_WORD(r, w, mont)) goto err; r_is_one = 0; } else { if (!BN_MOD_MUL_WORD(r, w, m)) goto err; } next_w = a; } w = next_w; } } /* Finally, set r:=r*w. */ if (w != 1) { if (r_is_one) { if (!BN_TO_MONTGOMERY_WORD(r, w, mont)) goto err; r_is_one = 0; } else { if (!BN_MOD_MUL_WORD(r, w, m)) goto err; } } if (r_is_one) { /* can happen only if a == 1 */ if (!BN_one(rr)) goto err; } else { if (!BN_from_montgomery(rr, r, mont, ctx)) goto err; } ret = 1; err: if ((in_mont == NULL) && (mont != NULL)) BN_MONT_CTX_free(mont); BN_CTX_end(ctx); bn_check_top(rr); return (ret); } Commit Message: CWE ID: CWE-200
static int MOD_EXP_CTIME_COPY_TO_PREBUF(const BIGNUM *b, int top, unsigned char *buf, int idx, int window) { int i, j; int width = 1 << window; BN_ULONG *table = (BN_ULONG *)buf; if (top > b->top) top = b->top; /* this works because 'buf' is explicitly * zeroed */ for (i = 0, j = idx; i < top; i++, j += width) { table[j] = b->d[i]; } return 1; unsigned char *buf, int idx, static int MOD_EXP_CTIME_COPY_FROM_PREBUF(BIGNUM *b, int top, unsigned char *buf, int idx, int window) { int i, j; int width = 1 << window; volatile BN_ULONG *table = (volatile BN_ULONG *)buf; if (bn_wexpand(b, top) == NULL) return 0; if (window <= 3) { for (i = 0; i < top; i++, table += width) { BN_ULONG acc = 0; for (j = 0; j < width; j++) { acc |= table[j] & ((BN_ULONG)0 - (constant_time_eq_int(j,idx)&1)); } b->d[i] = acc; } } else { int xstride = 1 << (window - 2); BN_ULONG y0, y1, y2, y3; i = idx >> (window - 2); /* equivalent of idx / xstride */ idx &= xstride - 1; /* equivalent of idx % xstride */ y0 = (BN_ULONG)0 - (constant_time_eq_int(i,0)&1); y1 = (BN_ULONG)0 - (constant_time_eq_int(i,1)&1); y2 = (BN_ULONG)0 - (constant_time_eq_int(i,2)&1); y3 = (BN_ULONG)0 - (constant_time_eq_int(i,3)&1); for (i = 0; i < top; i++, table += width) { BN_ULONG acc = 0; for (j = 0; j < xstride; j++) { acc |= ( (table[j + 0 * xstride] & y0) | (table[j + 1 * xstride] & y1) | (table[j + 2 * xstride] & y2) | (table[j + 3 * xstride] & y3) ) & ((BN_ULONG)0 - (constant_time_eq_int(j,idx)&1)); } b->d[i] = acc; } } b->top = top; if (!BN_is_odd(m)) { BNerr(BN_F_BN_MOD_EXP_MONT_CONSTTIME, BN_R_CALLED_WITH_EVEN_MODULUS); return (0); } top = m->top; bits = BN_num_bits(p); if (bits == 0) { /* x**0 mod 1 is still zero. */ if (BN_is_one(m)) { ret = 1; BN_zero(rr); } else { ret = BN_one(rr); } return ret; } BN_CTX_start(ctx); /* * Allocate a montgomery context if it was not supplied by the caller. If * this is not done, things will break in the montgomery part. */ if (in_mont != NULL) mont = in_mont; else { if ((mont = BN_MONT_CTX_new()) == NULL) goto err; if (!BN_MONT_CTX_set(mont, m, ctx)) goto err; } #ifdef RSAZ_ENABLED /* * If the size of the operands allow it, perform the optimized * RSAZ exponentiation. For further information see * crypto/bn/rsaz_exp.c and accompanying assembly modules. */ if ((16 == a->top) && (16 == p->top) && (BN_num_bits(m) == 1024) && rsaz_avx2_eligible()) { if (NULL == bn_wexpand(rr, 16)) goto err; RSAZ_1024_mod_exp_avx2(rr->d, a->d, p->d, m->d, mont->RR.d, mont->n0[0]); rr->top = 16; rr->neg = 0; bn_correct_top(rr); ret = 1; goto err; } else if ((8 == a->top) && (8 == p->top) && (BN_num_bits(m) == 512)) { if (NULL == bn_wexpand(rr, 8)) goto err; RSAZ_512_mod_exp(rr->d, a->d, p->d, m->d, mont->n0[0], mont->RR.d); rr->top = 8; rr->neg = 0; bn_correct_top(rr); ret = 1; goto err; } #endif /* Get the window size to use with size of p. */ window = BN_window_bits_for_ctime_exponent_size(bits); #if defined(SPARC_T4_MONT) if (window >= 5 && (top & 15) == 0 && top <= 64 && (OPENSSL_sparcv9cap_P[1] & (CFR_MONTMUL | CFR_MONTSQR)) == (CFR_MONTMUL | CFR_MONTSQR) && (t4 = OPENSSL_sparcv9cap_P[0])) window = 5; else #endif #if defined(OPENSSL_BN_ASM_MONT5) if (window >= 5) { window = 5; /* ~5% improvement for RSA2048 sign, and even * for RSA4096 */ if ((top & 7) == 0) powerbufLen += 2 * top * sizeof(m->d[0]); } #endif (void)0; /* * Allocate a buffer large enough to hold all of the pre-computed powers * of am, am itself and tmp. */ numPowers = 1 << window; powerbufLen += sizeof(m->d[0]) * (top * numPowers + ((2 * top) > numPowers ? (2 * top) : numPowers)); #ifdef alloca if (powerbufLen < 3072) powerbufFree = alloca(powerbufLen + MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH); else #endif if ((powerbufFree = (unsigned char *)OPENSSL_malloc(powerbufLen + MOD_EXP_CTIME_MIN_CACHE_LINE_WIDTH)) == NULL) goto err; powerbuf = MOD_EXP_CTIME_ALIGN(powerbufFree); memset(powerbuf, 0, powerbufLen); #ifdef alloca if (powerbufLen < 3072) powerbufFree = NULL; #endif /* lay down tmp and am right after powers table */ tmp.d = (BN_ULONG *)(powerbuf + sizeof(m->d[0]) * top * numPowers); am.d = tmp.d + top; tmp.top = am.top = 0; tmp.dmax = am.dmax = top; tmp.neg = am.neg = 0; tmp.flags = am.flags = BN_FLG_STATIC_DATA; /* prepare a^0 in Montgomery domain */ #if 1 /* by Shay Gueron's suggestion */ if (m->d[top - 1] & (((BN_ULONG)1) << (BN_BITS2 - 1))) { /* 2^(top*BN_BITS2) - m */ tmp.d[0] = (0 - m->d[0]) & BN_MASK2; for (i = 1; i < top; i++) tmp.d[i] = (~m->d[i]) & BN_MASK2; tmp.top = top; } else #endif if (!BN_to_montgomery(&tmp, BN_value_one(), mont, ctx)) goto err; /* prepare a^1 in Montgomery domain */ if (a->neg || BN_ucmp(a, m) >= 0) { if (!BN_mod(&am, a, m, ctx)) goto err; if (!BN_to_montgomery(&am, &am, mont, ctx)) goto err; } else if (!BN_to_montgomery(&am, a, mont, ctx)) goto err; #if defined(SPARC_T4_MONT) if (t4) { typedef int (*bn_pwr5_mont_f) (BN_ULONG *tp, const BN_ULONG *np, const BN_ULONG *n0, const void *table, int power, int bits); int bn_pwr5_mont_t4_8(BN_ULONG *tp, const BN_ULONG *np, const BN_ULONG *n0, const void *table, int power, int bits); int bn_pwr5_mont_t4_16(BN_ULONG *tp, const BN_ULONG *np, const BN_ULONG *n0, const void *table, int power, int bits); int bn_pwr5_mont_t4_24(BN_ULONG *tp, const BN_ULONG *np, const BN_ULONG *n0, const void *table, int power, int bits); int bn_pwr5_mont_t4_32(BN_ULONG *tp, const BN_ULONG *np, const BN_ULONG *n0, const void *table, int power, int bits); static const bn_pwr5_mont_f pwr5_funcs[4] = { bn_pwr5_mont_t4_8, bn_pwr5_mont_t4_16, bn_pwr5_mont_t4_24, bn_pwr5_mont_t4_32 }; bn_pwr5_mont_f pwr5_worker = pwr5_funcs[top / 16 - 1]; typedef int (*bn_mul_mont_f) (BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0); int bn_mul_mont_t4_8(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0); int bn_mul_mont_t4_16(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0); int bn_mul_mont_t4_24(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0); int bn_mul_mont_t4_32(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0); static const bn_mul_mont_f mul_funcs[4] = { bn_mul_mont_t4_8, bn_mul_mont_t4_16, bn_mul_mont_t4_24, bn_mul_mont_t4_32 }; bn_mul_mont_f mul_worker = mul_funcs[top / 16 - 1]; void bn_mul_mont_vis3(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0, int num); void bn_mul_mont_t4(BN_ULONG *rp, const BN_ULONG *ap, const void *bp, const BN_ULONG *np, const BN_ULONG *n0, int num); void bn_mul_mont_gather5_t4(BN_ULONG *rp, const BN_ULONG *ap, const void *table, const BN_ULONG *np, const BN_ULONG *n0, int num, int power); void bn_flip_n_scatter5_t4(const BN_ULONG *inp, size_t num, void *table, size_t power); void bn_gather5_t4(BN_ULONG *out, size_t num, void *table, size_t power); void bn_flip_t4(BN_ULONG *dst, BN_ULONG *src, size_t num); BN_ULONG *np = mont->N.d, *n0 = mont->n0; int stride = 5 * (6 - (top / 16 - 1)); /* multiple of 5, but less * than 32 */ /* * BN_to_montgomery can contaminate words above .top [in * BN_DEBUG[_DEBUG] build]... */ for (i = am.top; i < top; i++) am.d[i] = 0; for (i = tmp.top; i < top; i++) tmp.d[i] = 0; bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 0); bn_flip_n_scatter5_t4(am.d, top, powerbuf, 1); if (!(*mul_worker) (tmp.d, am.d, am.d, np, n0) && !(*mul_worker) (tmp.d, am.d, am.d, np, n0)) bn_mul_mont_vis3(tmp.d, am.d, am.d, np, n0, top); bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, 2); for (i = 3; i < 32; i++) { /* Calculate a^i = a^(i-1) * a */ if (!(*mul_worker) (tmp.d, tmp.d, am.d, np, n0) && !(*mul_worker) (tmp.d, tmp.d, am.d, np, n0)) bn_mul_mont_vis3(tmp.d, tmp.d, am.d, np, n0, top); bn_flip_n_scatter5_t4(tmp.d, top, powerbuf, i); } /* switch to 64-bit domain */ np = alloca(top * sizeof(BN_ULONG)); top /= 2; bn_flip_t4(np, mont->N.d, top); bits--; for (wvalue = 0, i = bits % 5; i >= 0; i--, bits--) wvalue = (wvalue << 1) + BN_is_bit_set(p, bits); bn_gather5_t4(tmp.d, top, powerbuf, wvalue); /* * Scan the exponent one window at a time starting from the most * significant bits. */ while (bits >= 0) { if (bits < stride) stride = bits + 1; bits -= stride; wvalue = bn_get_bits(p, bits + 1); if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride)) continue; /* retry once and fall back */ if ((*pwr5_worker) (tmp.d, np, n0, powerbuf, wvalue, stride)) continue; bits += stride - 5; wvalue >>= stride - 5; wvalue &= 31; bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont_t4(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont_gather5_t4(tmp.d, tmp.d, powerbuf, np, n0, top, wvalue); } bn_flip_t4(tmp.d, tmp.d, top); top *= 2; /* back to 32-bit domain */ tmp.top = top; bn_correct_top(&tmp); OPENSSL_cleanse(np, top * sizeof(BN_ULONG)); } else #endif #if defined(OPENSSL_BN_ASM_MONT5) if (window == 5 && top > 1) { /* * This optimization uses ideas from http://eprint.iacr.org/2011/239, * specifically optimization of cache-timing attack countermeasures * and pre-computation optimization. */ /* * Dedicated window==4 case improves 512-bit RSA sign by ~15%, but as * 512-bit RSA is hardly relevant, we omit it to spare size... */ void bn_mul_mont_gather5(BN_ULONG *rp, const BN_ULONG *ap, const void *table, const BN_ULONG *np, const BN_ULONG *n0, int num, int power); void bn_scatter5(const BN_ULONG *inp, size_t num, void *table, size_t power); void bn_gather5(BN_ULONG *out, size_t num, void *table, size_t power); void bn_power5(BN_ULONG *rp, const BN_ULONG *ap, const void *table, const BN_ULONG *np, const BN_ULONG *n0, int num, int power); int bn_get_bits5(const BN_ULONG *ap, int off); int bn_from_montgomery(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *not_used, const BN_ULONG *np, const BN_ULONG *n0, int num); BN_ULONG *np = mont->N.d, *n0 = mont->n0, *np2; /* * BN_to_montgomery can contaminate words above .top [in * BN_DEBUG[_DEBUG] build]... */ for (i = am.top; i < top; i++) am.d[i] = 0; for (i = tmp.top; i < top; i++) tmp.d[i] = 0; if (top & 7) np2 = np; else for (np2 = am.d + top, i = 0; i < top; i++) np2[2 * i] = np[i]; bn_scatter5(tmp.d, top, powerbuf, 0); bn_scatter5(am.d, am.top, powerbuf, 1); bn_mul_mont(tmp.d, am.d, am.d, np, n0, top); bn_scatter5(tmp.d, top, powerbuf, 2); # if 0 for (i = 3; i < 32; i++) { /* Calculate a^i = a^(i-1) * a */ bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np2, n0, top, i - 1); bn_scatter5(tmp.d, top, powerbuf, i); } # else /* same as above, but uses squaring for 1/2 of operations */ for (i = 4; i < 32; i *= 2) { bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_scatter5(tmp.d, top, powerbuf, i); } for (i = 3; i < 8; i += 2) { int j; bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np2, n0, top, i - 1); bn_scatter5(tmp.d, top, powerbuf, i); for (j = 2 * i; j < 32; j *= 2) { bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_scatter5(tmp.d, top, powerbuf, j); } } for (; i < 16; i += 2) { bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np2, n0, top, i - 1); bn_scatter5(tmp.d, top, powerbuf, i); bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_scatter5(tmp.d, top, powerbuf, 2 * i); } for (; i < 32; i += 2) { bn_mul_mont_gather5(tmp.d, am.d, powerbuf, np2, n0, top, i - 1); bn_scatter5(tmp.d, top, powerbuf, i); } # endif bits--; for (wvalue = 0, i = bits % 5; i >= 0; i--, bits--) wvalue = (wvalue << 1) + BN_is_bit_set(p, bits); bn_gather5(tmp.d, top, powerbuf, wvalue); /* * Scan the exponent one window at a time starting from the most * significant bits. */ if (top & 7) while (bits >= 0) { for (wvalue = 0, i = 0; i < 5; i++, bits--) wvalue = (wvalue << 1) + BN_is_bit_set(p, bits); bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont(tmp.d, tmp.d, tmp.d, np, n0, top); bn_mul_mont_gather5(tmp.d, tmp.d, powerbuf, np, n0, top, wvalue); } else { while (bits >= 0) { wvalue = bn_get_bits5(p->d, bits - 4); bits -= 5; bn_power5(tmp.d, tmp.d, powerbuf, np2, n0, top, wvalue); } } ret = bn_from_montgomery(tmp.d, tmp.d, NULL, np2, n0, top); tmp.top = top; bn_correct_top(&tmp); if (ret) { if (!BN_copy(rr, &tmp)) ret = 0; goto err; /* non-zero ret means it's not error */ } } else #endif { if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 0, numPowers)) goto err; if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&am, top, powerbuf, 1, numPowers)) goto err; /* * If the window size is greater than 1, then calculate * val[i=2..2^winsize-1]. Powers are computed as a*a^(i-1) (even * powers could instead be computed as (a^(i/2))^2 to use the slight * performance advantage of sqr over mul). */ if (window > 1) { if (!BN_mod_mul_montgomery(&tmp, &am, &am, mont, ctx)) goto err; if (!MOD_EXP_CTIME_COPY_TO_PREBUF (&tmp, top, powerbuf, 2, numPowers)) goto err; for (i = 3; i < numPowers; i++) { /* Calculate a^i = a^(i-1) * a */ if (!BN_mod_mul_montgomery(&tmp, &am, &tmp, mont, ctx)) goto err; if (!MOD_EXP_CTIME_COPY_TO_PREBUF (&tmp, top, powerbuf, i, numPowers)) goto err; } } bits--; for (wvalue = 0, i = bits % window; i >= 0; i--, bits--) wvalue = (wvalue << 1) + BN_is_bit_set(p, bits); if (!MOD_EXP_CTIME_COPY_FROM_PREBUF (&tmp, top, powerbuf, wvalue, numPowers)) goto err; /* * Scan the exponent one window at a time starting from the most } else #endif { if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 0, window)) goto err; if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&am, top, powerbuf, 1, window)) goto err; /* wvalue = (wvalue << 1) + BN_is_bit_set(p, bits); } /* * Fetch the appropriate pre-computed value from the pre-buf if (window > 1) { if (!BN_mod_mul_montgomery(&tmp, &am, &am, mont, ctx)) goto err; if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, 2, window)) goto err; for (i = 3; i < numPowers; i++) { /* Calculate a^i = a^(i-1) * a */ if (!BN_mod_mul_montgomery(&tmp, &am, &tmp, mont, ctx)) goto err; if (!MOD_EXP_CTIME_COPY_TO_PREBUF(&tmp, top, powerbuf, i, window)) goto err; } } for (i = 1; i < top; i++) bits--; for (wvalue = 0, i = bits % window; i >= 0; i--, bits--) wvalue = (wvalue << 1) + BN_is_bit_set(p, bits); if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&tmp, top, powerbuf, wvalue, window)) goto err; /* err: if ((in_mont == NULL) && (mont != NULL)) BN_MONT_CTX_free(mont); if (powerbuf != NULL) { OPENSSL_cleanse(powerbuf, powerbufLen); if (powerbufFree) OPENSSL_free(powerbufFree); } BN_CTX_end(ctx); return (ret); } int BN_mod_exp_mont_word(BIGNUM *rr, BN_ULONG a, const BIGNUM *p, /* * Fetch the appropriate pre-computed value from the pre-buf */ if (!MOD_EXP_CTIME_COPY_FROM_PREBUF(&am, top, powerbuf, wvalue, window)) goto err; /* Multiply the result into the intermediate result */ #define BN_MOD_MUL_WORD(r, w, m) \ (BN_mul_word(r, (w)) && \ (/* BN_ucmp(r, (m)) < 0 ? 1 :*/ \ (BN_mod(t, r, m, ctx) && (swap_tmp = r, r = t, t = swap_tmp, 1)))) /* * BN_MOD_MUL_WORD is only used with 'w' large, so the BN_ucmp test is * probably more overhead than always using BN_mod (which uses BN_copy if * a similar test returns true). */ /* * We can use BN_mod and do not need BN_nnmod because our accumulator is * never negative (the result of BN_mod does not depend on the sign of * the modulus). */ #define BN_TO_MONTGOMERY_WORD(r, w, mont) \ (BN_set_word(r, (w)) && BN_to_montgomery(r, r, (mont), ctx)) if (BN_get_flags(p, BN_FLG_CONSTTIME) != 0) { /* BN_FLG_CONSTTIME only supported by BN_mod_exp_mont() */ BNerr(BN_F_BN_MOD_EXP_MONT_WORD, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); return -1; } bn_check_top(p); bn_check_top(m); if (!BN_is_odd(m)) { BNerr(BN_F_BN_MOD_EXP_MONT_WORD, BN_R_CALLED_WITH_EVEN_MODULUS); return (0); } if (m->top == 1) a %= m->d[0]; /* make sure that 'a' is reduced */ bits = BN_num_bits(p); if (bits == 0) { /* x**0 mod 1 is still zero. */ if (BN_is_one(m)) { ret = 1; BN_zero(rr); } else { ret = BN_one(rr); } return ret; } if (a == 0) { BN_zero(rr); ret = 1; return ret; } BN_CTX_start(ctx); d = BN_CTX_get(ctx); r = BN_CTX_get(ctx); t = BN_CTX_get(ctx); if (d == NULL || r == NULL || t == NULL) goto err; if (in_mont != NULL) mont = in_mont; else { if ((mont = BN_MONT_CTX_new()) == NULL) goto err; if (!BN_MONT_CTX_set(mont, m, ctx)) goto err; } r_is_one = 1; /* except for Montgomery factor */ /* bits-1 >= 0 */ /* The result is accumulated in the product r*w. */ w = a; /* bit 'bits-1' of 'p' is always set */ for (b = bits - 2; b >= 0; b--) { /* First, square r*w. */ next_w = w * w; if ((next_w / w) != w) { /* overflow */ if (r_is_one) { if (!BN_TO_MONTGOMERY_WORD(r, w, mont)) goto err; r_is_one = 0; } else { if (!BN_MOD_MUL_WORD(r, w, m)) goto err; } next_w = 1; } w = next_w; if (!r_is_one) { if (!BN_mod_mul_montgomery(r, r, r, mont, ctx)) goto err; } /* Second, multiply r*w by 'a' if exponent bit is set. */ if (BN_is_bit_set(p, b)) { next_w = w * a; if ((next_w / a) != w) { /* overflow */ if (r_is_one) { if (!BN_TO_MONTGOMERY_WORD(r, w, mont)) goto err; r_is_one = 0; } else { if (!BN_MOD_MUL_WORD(r, w, m)) goto err; } next_w = a; } w = next_w; } } /* Finally, set r:=r*w. */ if (w != 1) { if (r_is_one) { if (!BN_TO_MONTGOMERY_WORD(r, w, mont)) goto err; r_is_one = 0; } else { if (!BN_MOD_MUL_WORD(r, w, m)) goto err; } } if (r_is_one) { /* can happen only if a == 1 */ if (!BN_one(rr)) goto err; } else { if (!BN_from_montgomery(rr, r, mont, ctx)) goto err; } ret = 1; err: if ((in_mont == NULL) && (mont != NULL)) BN_MONT_CTX_free(mont); BN_CTX_end(ctx); bn_check_top(rr); return (ret); }
165,254
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: bool ChromeContentClient::SandboxPlugin(CommandLine* command_line, sandbox::TargetPolicy* policy) { std::wstring plugin_dll = command_line-> GetSwitchValueNative(switches::kPluginPath); FilePath builtin_flash; if (!PathService::Get(chrome::FILE_FLASH_PLUGIN, &builtin_flash)) return false; FilePath plugin_path(plugin_dll); if (plugin_path.BaseName() != builtin_flash.BaseName()) return false; if (base::win::GetVersion() <= base::win::VERSION_XP || CommandLine::ForCurrentProcess()->HasSwitch( switches::kDisableFlashSandbox)) { return false; } if (policy->AddRule(sandbox::TargetPolicy::SUBSYS_NAMED_PIPES, sandbox::TargetPolicy::NAMEDPIPES_ALLOW_ANY, L"\\\\.\\pipe\\chrome.*") != sandbox::SBOX_ALL_OK) { NOTREACHED(); return false; } if (LoadFlashBroker(plugin_path, command_line)) { policy->SetJobLevel(sandbox::JOB_UNPROTECTED, 0); policy->SetTokenLevel(sandbox::USER_RESTRICTED_SAME_ACCESS, sandbox::USER_INTERACTIVE); if (base::win::GetVersion() == base::win::VERSION_VISTA) { ::ChangeWindowMessageFilter(WM_MOUSEWHEEL, MSGFLT_ADD); ::ChangeWindowMessageFilter(WM_APPCOMMAND, MSGFLT_ADD); } policy->SetIntegrityLevel(sandbox::INTEGRITY_LEVEL_LOW); } else { DLOG(WARNING) << "Failed to start flash broker"; policy->SetJobLevel(sandbox::JOB_UNPROTECTED, 0); policy->SetTokenLevel( sandbox::USER_UNPROTECTED, sandbox::USER_UNPROTECTED); } return true; } Commit Message: Convert plugin and GPU process to brokered handle duplication. BUG=119250 Review URL: https://chromiumcodereview.appspot.com/9958034 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@132303 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID:
bool ChromeContentClient::SandboxPlugin(CommandLine* command_line, sandbox::TargetPolicy* policy) { std::wstring plugin_dll = command_line-> GetSwitchValueNative(switches::kPluginPath); FilePath builtin_flash; if (!PathService::Get(chrome::FILE_FLASH_PLUGIN, &builtin_flash)) return false; FilePath plugin_path(plugin_dll); if (plugin_path.BaseName() != builtin_flash.BaseName()) return false; if (base::win::GetVersion() <= base::win::VERSION_XP || CommandLine::ForCurrentProcess()->HasSwitch( switches::kDisableFlashSandbox)) { return false; } // Add policy for the plugin proxy window pump event // used by WebPluginDelegateProxy::HandleInputEvent(). if (policy->AddRule(sandbox::TargetPolicy::SUBSYS_HANDLES, sandbox::TargetPolicy::HANDLES_DUP_ANY, L"Event") != sandbox::SBOX_ALL_OK) { NOTREACHED(); return false; } if (policy->AddRule(sandbox::TargetPolicy::SUBSYS_NAMED_PIPES, sandbox::TargetPolicy::NAMEDPIPES_ALLOW_ANY, L"\\\\.\\pipe\\chrome.*") != sandbox::SBOX_ALL_OK) { NOTREACHED(); return false; } if (LoadFlashBroker(plugin_path, command_line)) { policy->SetJobLevel(sandbox::JOB_UNPROTECTED, 0); policy->SetTokenLevel(sandbox::USER_RESTRICTED_SAME_ACCESS, sandbox::USER_INTERACTIVE); if (base::win::GetVersion() == base::win::VERSION_VISTA) { ::ChangeWindowMessageFilter(WM_MOUSEWHEEL, MSGFLT_ADD); ::ChangeWindowMessageFilter(WM_APPCOMMAND, MSGFLT_ADD); } policy->SetIntegrityLevel(sandbox::INTEGRITY_LEVEL_LOW); } else { DLOG(WARNING) << "Failed to start flash broker"; policy->SetJobLevel(sandbox::JOB_UNPROTECTED, 0); policy->SetTokenLevel( sandbox::USER_UNPROTECTED, sandbox::USER_UNPROTECTED); } return true; }
170,916
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: _zip_dirent_read(zip_dirent_t *zde, zip_source_t *src, zip_buffer_t *buffer, bool local, zip_error_t *error) { zip_uint8_t buf[CDENTRYSIZE]; zip_uint16_t dostime, dosdate; zip_uint32_t size, variable_size; zip_uint16_t filename_len, comment_len, ef_len; bool from_buffer = (buffer != NULL); size = local ? LENTRYSIZE : CDENTRYSIZE; if (buffer) { if (_zip_buffer_left(buffer) < size) { zip_error_set(error, ZIP_ER_NOZIP, 0); return -1; } } else { if ((buffer = _zip_buffer_new_from_source(src, size, buf, error)) == NULL) { return -1; } } if (memcmp(_zip_buffer_get(buffer, 4), (local ? LOCAL_MAGIC : CENTRAL_MAGIC), 4) != 0) { zip_error_set(error, ZIP_ER_NOZIP, 0); if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } /* convert buffercontents to zip_dirent */ _zip_dirent_init(zde); if (!local) zde->version_madeby = _zip_buffer_get_16(buffer); else zde->version_madeby = 0; zde->version_needed = _zip_buffer_get_16(buffer); zde->bitflags = _zip_buffer_get_16(buffer); zde->comp_method = _zip_buffer_get_16(buffer); /* convert to time_t */ dostime = _zip_buffer_get_16(buffer); dosdate = _zip_buffer_get_16(buffer); zde->last_mod = _zip_d2u_time(dostime, dosdate); zde->crc = _zip_buffer_get_32(buffer); zde->comp_size = _zip_buffer_get_32(buffer); zde->uncomp_size = _zip_buffer_get_32(buffer); filename_len = _zip_buffer_get_16(buffer); ef_len = _zip_buffer_get_16(buffer); if (local) { comment_len = 0; zde->disk_number = 0; zde->int_attrib = 0; zde->ext_attrib = 0; zde->offset = 0; } else { comment_len = _zip_buffer_get_16(buffer); zde->disk_number = _zip_buffer_get_16(buffer); zde->int_attrib = _zip_buffer_get_16(buffer); zde->ext_attrib = _zip_buffer_get_32(buffer); zde->offset = _zip_buffer_get_32(buffer); } if (!_zip_buffer_ok(buffer)) { zip_error_set(error, ZIP_ER_INTERNAL, 0); if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } if (zde->bitflags & ZIP_GPBF_ENCRYPTED) { if (zde->bitflags & ZIP_GPBF_STRONG_ENCRYPTION) { /* TODO */ zde->encryption_method = ZIP_EM_UNKNOWN; } else { zde->encryption_method = ZIP_EM_TRAD_PKWARE; } } else { zde->encryption_method = ZIP_EM_NONE; } zde->filename = NULL; zde->extra_fields = NULL; zde->comment = NULL; variable_size = (zip_uint32_t)filename_len+(zip_uint32_t)ef_len+(zip_uint32_t)comment_len; if (from_buffer) { if (_zip_buffer_left(buffer) < variable_size) { zip_error_set(error, ZIP_ER_INCONS, 0); return -1; } } else { _zip_buffer_free(buffer); if ((buffer = _zip_buffer_new_from_source(src, variable_size, NULL, error)) == NULL) { return -1; } } if (filename_len) { zde->filename = _zip_read_string(buffer, src, filename_len, 1, error); if (!zde->filename) { if (zip_error_code_zip(error) == ZIP_ER_EOF) { zip_error_set(error, ZIP_ER_INCONS, 0); } if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } if (zde->bitflags & ZIP_GPBF_ENCODING_UTF_8) { if (_zip_guess_encoding(zde->filename, ZIP_ENCODING_UTF8_KNOWN) == ZIP_ENCODING_ERROR) { zip_error_set(error, ZIP_ER_INCONS, 0); if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } } } if (ef_len) { zip_uint8_t *ef = _zip_read_data(buffer, src, ef_len, 0, error); if (ef == NULL) { if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } if (!_zip_ef_parse(ef, ef_len, local ? ZIP_EF_LOCAL : ZIP_EF_CENTRAL, &zde->extra_fields, error)) { free(ef); if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } free(ef); if (local) zde->local_extra_fields_read = 1; } if (comment_len) { zde->comment = _zip_read_string(buffer, src, comment_len, 0, error); if (!zde->comment) { if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } if (zde->bitflags & ZIP_GPBF_ENCODING_UTF_8) { if (_zip_guess_encoding(zde->comment, ZIP_ENCODING_UTF8_KNOWN) == ZIP_ENCODING_ERROR) { zip_error_set(error, ZIP_ER_INCONS, 0); if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } } } zde->filename = _zip_dirent_process_ef_utf_8(zde, ZIP_EF_UTF_8_NAME, zde->filename); zde->comment = _zip_dirent_process_ef_utf_8(zde, ZIP_EF_UTF_8_COMMENT, zde->comment); /* Zip64 */ if (zde->uncomp_size == ZIP_UINT32_MAX || zde->comp_size == ZIP_UINT32_MAX || zde->offset == ZIP_UINT32_MAX) { zip_uint16_t got_len; zip_buffer_t *ef_buffer; const zip_uint8_t *ef = _zip_ef_get_by_id(zde->extra_fields, &got_len, ZIP_EF_ZIP64, 0, local ? ZIP_EF_LOCAL : ZIP_EF_CENTRAL, error); /* TODO: if got_len == 0 && !ZIP64_EOCD: no error, 0xffffffff is valid value */ if (ef == NULL) { if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } if ((ef_buffer = _zip_buffer_new((zip_uint8_t *)ef, got_len)) == NULL) { zip_error_set(error, ZIP_ER_MEMORY, 0); if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } if (zde->uncomp_size == ZIP_UINT32_MAX) zde->uncomp_size = _zip_buffer_get_64(ef_buffer); else if (local) { /* From appnote.txt: This entry in the Local header MUST include BOTH original and compressed file size fields. */ (void)_zip_buffer_skip(ef_buffer, 8); /* error is caught by _zip_buffer_eof() call */ } if (zde->comp_size == ZIP_UINT32_MAX) zde->comp_size = _zip_buffer_get_64(ef_buffer); if (!local) { if (zde->offset == ZIP_UINT32_MAX) zde->offset = _zip_buffer_get_64(ef_buffer); if (zde->disk_number == ZIP_UINT16_MAX) zde->disk_number = _zip_buffer_get_32(buffer); } if (!_zip_buffer_eof(ef_buffer)) { zip_error_set(error, ZIP_ER_INCONS, 0); _zip_buffer_free(ef_buffer); if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } _zip_buffer_free(ef_buffer); } if (!_zip_buffer_ok(buffer)) { zip_error_set(error, ZIP_ER_INTERNAL, 0); if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } if (!from_buffer) { _zip_buffer_free(buffer); } /* zip_source_seek / zip_source_tell don't support values > ZIP_INT64_MAX */ if (zde->offset > ZIP_INT64_MAX) { zip_error_set(error, ZIP_ER_SEEK, EFBIG); return -1; } if (!_zip_dirent_process_winzip_aes(zde, error)) { if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } zde->extra_fields = _zip_ef_remove_internal(zde->extra_fields); return (zip_int64_t)(size + variable_size); } Commit Message: Fix double free(). Found by Brian 'geeknik' Carpenter using AFL. CWE ID: CWE-415
_zip_dirent_read(zip_dirent_t *zde, zip_source_t *src, zip_buffer_t *buffer, bool local, zip_error_t *error) { zip_uint8_t buf[CDENTRYSIZE]; zip_uint16_t dostime, dosdate; zip_uint32_t size, variable_size; zip_uint16_t filename_len, comment_len, ef_len; bool from_buffer = (buffer != NULL); size = local ? LENTRYSIZE : CDENTRYSIZE; if (buffer) { if (_zip_buffer_left(buffer) < size) { zip_error_set(error, ZIP_ER_NOZIP, 0); return -1; } } else { if ((buffer = _zip_buffer_new_from_source(src, size, buf, error)) == NULL) { return -1; } } if (memcmp(_zip_buffer_get(buffer, 4), (local ? LOCAL_MAGIC : CENTRAL_MAGIC), 4) != 0) { zip_error_set(error, ZIP_ER_NOZIP, 0); if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } /* convert buffercontents to zip_dirent */ _zip_dirent_init(zde); if (!local) zde->version_madeby = _zip_buffer_get_16(buffer); else zde->version_madeby = 0; zde->version_needed = _zip_buffer_get_16(buffer); zde->bitflags = _zip_buffer_get_16(buffer); zde->comp_method = _zip_buffer_get_16(buffer); /* convert to time_t */ dostime = _zip_buffer_get_16(buffer); dosdate = _zip_buffer_get_16(buffer); zde->last_mod = _zip_d2u_time(dostime, dosdate); zde->crc = _zip_buffer_get_32(buffer); zde->comp_size = _zip_buffer_get_32(buffer); zde->uncomp_size = _zip_buffer_get_32(buffer); filename_len = _zip_buffer_get_16(buffer); ef_len = _zip_buffer_get_16(buffer); if (local) { comment_len = 0; zde->disk_number = 0; zde->int_attrib = 0; zde->ext_attrib = 0; zde->offset = 0; } else { comment_len = _zip_buffer_get_16(buffer); zde->disk_number = _zip_buffer_get_16(buffer); zde->int_attrib = _zip_buffer_get_16(buffer); zde->ext_attrib = _zip_buffer_get_32(buffer); zde->offset = _zip_buffer_get_32(buffer); } if (!_zip_buffer_ok(buffer)) { zip_error_set(error, ZIP_ER_INTERNAL, 0); if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } if (zde->bitflags & ZIP_GPBF_ENCRYPTED) { if (zde->bitflags & ZIP_GPBF_STRONG_ENCRYPTION) { /* TODO */ zde->encryption_method = ZIP_EM_UNKNOWN; } else { zde->encryption_method = ZIP_EM_TRAD_PKWARE; } } else { zde->encryption_method = ZIP_EM_NONE; } zde->filename = NULL; zde->extra_fields = NULL; zde->comment = NULL; variable_size = (zip_uint32_t)filename_len+(zip_uint32_t)ef_len+(zip_uint32_t)comment_len; if (from_buffer) { if (_zip_buffer_left(buffer) < variable_size) { zip_error_set(error, ZIP_ER_INCONS, 0); return -1; } } else { _zip_buffer_free(buffer); if ((buffer = _zip_buffer_new_from_source(src, variable_size, NULL, error)) == NULL) { return -1; } } if (filename_len) { zde->filename = _zip_read_string(buffer, src, filename_len, 1, error); if (!zde->filename) { if (zip_error_code_zip(error) == ZIP_ER_EOF) { zip_error_set(error, ZIP_ER_INCONS, 0); } if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } if (zde->bitflags & ZIP_GPBF_ENCODING_UTF_8) { if (_zip_guess_encoding(zde->filename, ZIP_ENCODING_UTF8_KNOWN) == ZIP_ENCODING_ERROR) { zip_error_set(error, ZIP_ER_INCONS, 0); if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } } } if (ef_len) { zip_uint8_t *ef = _zip_read_data(buffer, src, ef_len, 0, error); if (ef == NULL) { if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } if (!_zip_ef_parse(ef, ef_len, local ? ZIP_EF_LOCAL : ZIP_EF_CENTRAL, &zde->extra_fields, error)) { free(ef); if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } free(ef); if (local) zde->local_extra_fields_read = 1; } if (comment_len) { zde->comment = _zip_read_string(buffer, src, comment_len, 0, error); if (!zde->comment) { if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } if (zde->bitflags & ZIP_GPBF_ENCODING_UTF_8) { if (_zip_guess_encoding(zde->comment, ZIP_ENCODING_UTF8_KNOWN) == ZIP_ENCODING_ERROR) { zip_error_set(error, ZIP_ER_INCONS, 0); if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } } } zde->filename = _zip_dirent_process_ef_utf_8(zde, ZIP_EF_UTF_8_NAME, zde->filename); zde->comment = _zip_dirent_process_ef_utf_8(zde, ZIP_EF_UTF_8_COMMENT, zde->comment); /* Zip64 */ if (zde->uncomp_size == ZIP_UINT32_MAX || zde->comp_size == ZIP_UINT32_MAX || zde->offset == ZIP_UINT32_MAX) { zip_uint16_t got_len; zip_buffer_t *ef_buffer; const zip_uint8_t *ef = _zip_ef_get_by_id(zde->extra_fields, &got_len, ZIP_EF_ZIP64, 0, local ? ZIP_EF_LOCAL : ZIP_EF_CENTRAL, error); /* TODO: if got_len == 0 && !ZIP64_EOCD: no error, 0xffffffff is valid value */ if (ef == NULL) { if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } if ((ef_buffer = _zip_buffer_new((zip_uint8_t *)ef, got_len)) == NULL) { zip_error_set(error, ZIP_ER_MEMORY, 0); if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } if (zde->uncomp_size == ZIP_UINT32_MAX) zde->uncomp_size = _zip_buffer_get_64(ef_buffer); else if (local) { /* From appnote.txt: This entry in the Local header MUST include BOTH original and compressed file size fields. */ (void)_zip_buffer_skip(ef_buffer, 8); /* error is caught by _zip_buffer_eof() call */ } if (zde->comp_size == ZIP_UINT32_MAX) zde->comp_size = _zip_buffer_get_64(ef_buffer); if (!local) { if (zde->offset == ZIP_UINT32_MAX) zde->offset = _zip_buffer_get_64(ef_buffer); if (zde->disk_number == ZIP_UINT16_MAX) zde->disk_number = _zip_buffer_get_32(buffer); } if (!_zip_buffer_eof(ef_buffer)) { zip_error_set(error, ZIP_ER_INCONS, 0); _zip_buffer_free(ef_buffer); if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } _zip_buffer_free(ef_buffer); } if (!_zip_buffer_ok(buffer)) { zip_error_set(error, ZIP_ER_INTERNAL, 0); if (!from_buffer) { _zip_buffer_free(buffer); } return -1; } if (!from_buffer) { _zip_buffer_free(buffer); } /* zip_source_seek / zip_source_tell don't support values > ZIP_INT64_MAX */ if (zde->offset > ZIP_INT64_MAX) { zip_error_set(error, ZIP_ER_SEEK, EFBIG); return -1; } if (!_zip_dirent_process_winzip_aes(zde, error)) { return -1; } zde->extra_fields = _zip_ef_remove_internal(zde->extra_fields); return (zip_int64_t)(size + variable_size); }
167,965
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: SpeechSynthesisLibrary* CrosLibrary::GetSpeechSynthesisLibrary() { return speech_synthesis_lib_.GetDefaultImpl(use_stub_impl_); } Commit Message: chromeos: Replace copy-and-pasted code with macros. This replaces a bunch of duplicated-per-library cros function definitions and comments. BUG=none TEST=built it Review URL: http://codereview.chromium.org/6086007 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@70070 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-189
SpeechSynthesisLibrary* CrosLibrary::GetSpeechSynthesisLibrary() {
170,630
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int iwgif_read_image(struct iwgifrcontext *rctx) { int retval=0; struct lzwdeccontext d; size_t subblocksize; int has_local_ct; int local_ct_size; unsigned int root_codesize; if(!iwgif_read(rctx,rctx->rbuf,9)) goto done; rctx->image_left = (int)iw_get_ui16le(&rctx->rbuf[0]); rctx->image_top = (int)iw_get_ui16le(&rctx->rbuf[2]); rctx->image_width = (int)iw_get_ui16le(&rctx->rbuf[4]); rctx->image_height = (int)iw_get_ui16le(&rctx->rbuf[6]); rctx->interlaced = (int)((rctx->rbuf[8]>>6)&0x01); has_local_ct = (int)((rctx->rbuf[8]>>7)&0x01); if(has_local_ct) { local_ct_size = (int)(rctx->rbuf[8]&0x07); rctx->colortable.num_entries = 1<<(1+local_ct_size); } if(has_local_ct) { if(!iwgif_read_color_table(rctx,&rctx->colortable)) goto done; } if(rctx->has_transparency) { rctx->colortable.entry[rctx->trans_color_index].a = 0; } if(!iwgif_read(rctx,rctx->rbuf,1)) goto done; root_codesize = (unsigned int)rctx->rbuf[0]; if(root_codesize<2 || root_codesize>11) { iw_set_error(rctx->ctx,"Invalid LZW minimum code size"); goto done; } if(!iwgif_init_screen(rctx)) goto done; rctx->total_npixels = (size_t)rctx->image_width * (size_t)rctx->image_height; if(!iwgif_make_row_pointers(rctx)) goto done; lzw_init(&d,root_codesize); lzw_clear(&d); while(1) { if(!iwgif_read(rctx,rctx->rbuf,1)) goto done; subblocksize = (size_t)rctx->rbuf[0]; if(subblocksize==0) break; if(!iwgif_read(rctx,rctx->rbuf,subblocksize)) goto done; if(!lzw_process_bytes(rctx,&d,rctx->rbuf,subblocksize)) goto done; if(d.eoi_flag) break; if(rctx->pixels_set >= rctx->total_npixels) break; } retval=1; done: return retval; } Commit Message: Fixed a GIF decoding bug (divide by zero) Fixes issue #15 CWE ID: CWE-369
static int iwgif_read_image(struct iwgifrcontext *rctx) { int retval=0; struct lzwdeccontext d; size_t subblocksize; int has_local_ct; int local_ct_size; unsigned int root_codesize; if(!iwgif_read(rctx,rctx->rbuf,9)) goto done; rctx->image_left = (int)iw_get_ui16le(&rctx->rbuf[0]); rctx->image_top = (int)iw_get_ui16le(&rctx->rbuf[2]); rctx->image_width = (int)iw_get_ui16le(&rctx->rbuf[4]); rctx->image_height = (int)iw_get_ui16le(&rctx->rbuf[6]); if(rctx->image_width<1 || rctx->image_height<1) { iw_set_error(rctx->ctx, "Invalid image dimensions"); goto done; } rctx->interlaced = (int)((rctx->rbuf[8]>>6)&0x01); has_local_ct = (int)((rctx->rbuf[8]>>7)&0x01); if(has_local_ct) { local_ct_size = (int)(rctx->rbuf[8]&0x07); rctx->colortable.num_entries = 1<<(1+local_ct_size); } if(has_local_ct) { if(!iwgif_read_color_table(rctx,&rctx->colortable)) goto done; } if(rctx->has_transparency) { rctx->colortable.entry[rctx->trans_color_index].a = 0; } if(!iwgif_read(rctx,rctx->rbuf,1)) goto done; root_codesize = (unsigned int)rctx->rbuf[0]; if(root_codesize<2 || root_codesize>11) { iw_set_error(rctx->ctx,"Invalid LZW minimum code size"); goto done; } if(!iwgif_init_screen(rctx)) goto done; rctx->total_npixels = (size_t)rctx->image_width * (size_t)rctx->image_height; if(!iwgif_make_row_pointers(rctx)) goto done; lzw_init(&d,root_codesize); lzw_clear(&d); while(1) { if(!iwgif_read(rctx,rctx->rbuf,1)) goto done; subblocksize = (size_t)rctx->rbuf[0]; if(subblocksize==0) break; if(!iwgif_read(rctx,rctx->rbuf,subblocksize)) goto done; if(!lzw_process_bytes(rctx,&d,rctx->rbuf,subblocksize)) goto done; if(d.eoi_flag) break; if(rctx->pixels_set >= rctx->total_npixels) break; } retval=1; done: return retval; }
168,231
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: OmniboxPopupViewGtk::OmniboxPopupViewGtk(const gfx::Font& font, OmniboxView* omnibox_view, AutocompleteEditModel* edit_model, GtkWidget* location_bar) : model_(new AutocompletePopupModel(this, edit_model)), omnibox_view_(omnibox_view), location_bar_(location_bar), window_(gtk_window_new(GTK_WINDOW_POPUP)), layout_(NULL), theme_service_(ThemeServiceGtk::GetFrom(edit_model->profile())), font_(font.DeriveFont(kEditFontAdjust)), ignore_mouse_drag_(false), opened_(false) { gtk_widget_set_can_focus(window_, FALSE); gtk_window_set_resizable(GTK_WINDOW(window_), FALSE); gtk_widget_set_app_paintable(window_, TRUE); gtk_widget_set_double_buffered(window_, TRUE); layout_ = gtk_widget_create_pango_layout(window_, NULL); pango_layout_set_auto_dir(layout_, FALSE); pango_layout_set_ellipsize(layout_, PANGO_ELLIPSIZE_END); gtk_widget_add_events(window_, GDK_BUTTON_MOTION_MASK | GDK_POINTER_MOTION_MASK | GDK_BUTTON_PRESS_MASK | GDK_BUTTON_RELEASE_MASK); g_signal_connect(window_, "motion-notify-event", G_CALLBACK(HandleMotionThunk), this); g_signal_connect(window_, "button-press-event", G_CALLBACK(HandleButtonPressThunk), this); g_signal_connect(window_, "button-release-event", G_CALLBACK(HandleButtonReleaseThunk), this); g_signal_connect(window_, "expose-event", G_CALLBACK(HandleExposeThunk), this); registrar_.Add(this, chrome::NOTIFICATION_BROWSER_THEME_CHANGED, content::Source<ThemeService>(theme_service_)); theme_service_->InitThemesFor(this); } Commit Message: GTK: Stop listening to gtk signals in the omnibox before destroying the model. BUG=123530 TEST=none Review URL: http://codereview.chromium.org/10103012 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@132498 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-399
OmniboxPopupViewGtk::OmniboxPopupViewGtk(const gfx::Font& font, OmniboxView* omnibox_view, AutocompleteEditModel* edit_model, GtkWidget* location_bar) : signal_registrar_(new ui::GtkSignalRegistrar), model_(new AutocompletePopupModel(this, edit_model)), omnibox_view_(omnibox_view), location_bar_(location_bar), window_(gtk_window_new(GTK_WINDOW_POPUP)), layout_(NULL), theme_service_(ThemeServiceGtk::GetFrom(edit_model->profile())), font_(font.DeriveFont(kEditFontAdjust)), ignore_mouse_drag_(false), opened_(false) { gtk_widget_set_can_focus(window_, FALSE); gtk_window_set_resizable(GTK_WINDOW(window_), FALSE); gtk_widget_set_app_paintable(window_, TRUE); gtk_widget_set_double_buffered(window_, TRUE); layout_ = gtk_widget_create_pango_layout(window_, NULL); pango_layout_set_auto_dir(layout_, FALSE); pango_layout_set_ellipsize(layout_, PANGO_ELLIPSIZE_END); gtk_widget_add_events(window_, GDK_BUTTON_MOTION_MASK | GDK_POINTER_MOTION_MASK | GDK_BUTTON_PRESS_MASK | GDK_BUTTON_RELEASE_MASK); signal_registrar_->Connect(window_, "motion-notify-event", G_CALLBACK(HandleMotionThunk), this); signal_registrar_->Connect(window_, "button-press-event", G_CALLBACK(HandleButtonPressThunk), this); signal_registrar_->Connect(window_, "button-release-event", G_CALLBACK(HandleButtonReleaseThunk), this); signal_registrar_->Connect(window_, "expose-event", G_CALLBACK(HandleExposeThunk), this); registrar_.Add(this, chrome::NOTIFICATION_BROWSER_THEME_CHANGED, content::Source<ThemeService>(theme_service_)); theme_service_->InitThemesFor(this); }
171,048
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int decode_slice_header(H264Context *h, H264Context *h0) { unsigned int first_mb_in_slice; unsigned int pps_id; int ret; unsigned int slice_type, tmp, i, j; int last_pic_structure, last_pic_droppable; int must_reinit; int needs_reinit = 0; int field_pic_flag, bottom_field_flag; h->me.qpel_put = h->h264qpel.put_h264_qpel_pixels_tab; h->me.qpel_avg = h->h264qpel.avg_h264_qpel_pixels_tab; first_mb_in_slice = get_ue_golomb_long(&h->gb); if (first_mb_in_slice == 0) { // FIXME better field boundary detection if (h0->current_slice && FIELD_PICTURE(h)) { field_end(h, 1); } h0->current_slice = 0; if (!h0->first_field) { if (h->cur_pic_ptr && !h->droppable) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, h->picture_structure == PICT_BOTTOM_FIELD); } h->cur_pic_ptr = NULL; } } slice_type = get_ue_golomb_31(&h->gb); if (slice_type > 9) { av_log(h->avctx, AV_LOG_ERROR, "slice type too large (%d) at %d %d\n", slice_type, h->mb_x, h->mb_y); return AVERROR_INVALIDDATA; } if (slice_type > 4) { slice_type -= 5; h->slice_type_fixed = 1; } else h->slice_type_fixed = 0; slice_type = golomb_to_pict_type[slice_type]; h->slice_type = slice_type; h->slice_type_nos = slice_type & 3; h->pict_type = h->slice_type; pps_id = get_ue_golomb(&h->gb); if (pps_id >= MAX_PPS_COUNT) { av_log(h->avctx, AV_LOG_ERROR, "pps_id %d out of range\n", pps_id); return AVERROR_INVALIDDATA; } if (!h0->pps_buffers[pps_id]) { av_log(h->avctx, AV_LOG_ERROR, "non-existing PPS %u referenced\n", pps_id); return AVERROR_INVALIDDATA; } h->pps = *h0->pps_buffers[pps_id]; if (!h0->sps_buffers[h->pps.sps_id]) { av_log(h->avctx, AV_LOG_ERROR, "non-existing SPS %u referenced\n", h->pps.sps_id); return AVERROR_INVALIDDATA; } if (h->pps.sps_id != h->current_sps_id || h0->sps_buffers[h->pps.sps_id]->new) { h0->sps_buffers[h->pps.sps_id]->new = 0; h->current_sps_id = h->pps.sps_id; h->sps = *h0->sps_buffers[h->pps.sps_id]; if (h->mb_width != h->sps.mb_width || h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) || h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma || h->cur_chroma_format_idc != h->sps.chroma_format_idc ) needs_reinit = 1; if (h->bit_depth_luma != h->sps.bit_depth_luma || h->chroma_format_idc != h->sps.chroma_format_idc) { h->bit_depth_luma = h->sps.bit_depth_luma; h->chroma_format_idc = h->sps.chroma_format_idc; needs_reinit = 1; } if ((ret = h264_set_parameter_from_sps(h)) < 0) return ret; } h->avctx->profile = ff_h264_get_profile(&h->sps); h->avctx->level = h->sps.level_idc; h->avctx->refs = h->sps.ref_frame_count; must_reinit = (h->context_initialized && ( 16*h->sps.mb_width != h->avctx->coded_width || 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != h->avctx->coded_height || h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma || h->cur_chroma_format_idc != h->sps.chroma_format_idc || av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio) || h->mb_width != h->sps.mb_width || h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) )); if (h0->avctx->pix_fmt != get_pixel_format(h0, 0)) must_reinit = 1; h->mb_width = h->sps.mb_width; h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag); h->mb_num = h->mb_width * h->mb_height; h->mb_stride = h->mb_width + 1; h->b_stride = h->mb_width * 4; h->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p h->width = 16 * h->mb_width; h->height = 16 * h->mb_height; ret = init_dimensions(h); if (ret < 0) return ret; if (h->sps.video_signal_type_present_flag) { h->avctx->color_range = h->sps.full_range>0 ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; if (h->sps.colour_description_present_flag) { if (h->avctx->colorspace != h->sps.colorspace) needs_reinit = 1; h->avctx->color_primaries = h->sps.color_primaries; h->avctx->color_trc = h->sps.color_trc; h->avctx->colorspace = h->sps.colorspace; } } if (h->context_initialized && (h->width != h->avctx->coded_width || h->height != h->avctx->coded_height || must_reinit || needs_reinit)) { if (h != h0) { av_log(h->avctx, AV_LOG_ERROR, "changing width/height on " "slice %d\n", h0->current_slice + 1); return AVERROR_INVALIDDATA; } flush_change(h); if ((ret = get_pixel_format(h, 1)) < 0) return ret; h->avctx->pix_fmt = ret; av_log(h->avctx, AV_LOG_INFO, "Reinit context to %dx%d, " "pix_fmt: %s\n", h->width, h->height, av_get_pix_fmt_name(h->avctx->pix_fmt)); if ((ret = h264_slice_header_init(h, 1)) < 0) { av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed\n"); return ret; } } if (!h->context_initialized) { if (h != h0) { av_log(h->avctx, AV_LOG_ERROR, "Cannot (re-)initialize context during parallel decoding.\n"); return AVERROR_PATCHWELCOME; } if ((ret = get_pixel_format(h, 1)) < 0) return ret; h->avctx->pix_fmt = ret; if ((ret = h264_slice_header_init(h, 0)) < 0) { av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed\n"); return ret; } } if (h == h0 && h->dequant_coeff_pps != pps_id) { h->dequant_coeff_pps = pps_id; init_dequant_tables(h); } h->frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num); h->mb_mbaff = 0; h->mb_aff_frame = 0; last_pic_structure = h0->picture_structure; last_pic_droppable = h0->droppable; h->droppable = h->nal_ref_idc == 0; if (h->sps.frame_mbs_only_flag) { h->picture_structure = PICT_FRAME; } else { if (!h->sps.direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) { av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n"); return -1; } field_pic_flag = get_bits1(&h->gb); if (field_pic_flag) { bottom_field_flag = get_bits1(&h->gb); h->picture_structure = PICT_TOP_FIELD + bottom_field_flag; } else { h->picture_structure = PICT_FRAME; h->mb_aff_frame = h->sps.mb_aff; } } h->mb_field_decoding_flag = h->picture_structure != PICT_FRAME; if (h0->current_slice != 0) { if (last_pic_structure != h->picture_structure || last_pic_droppable != h->droppable) { av_log(h->avctx, AV_LOG_ERROR, "Changing field mode (%d -> %d) between slices is not allowed\n", last_pic_structure, h->picture_structure); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_INVALIDDATA; } else if (!h0->cur_pic_ptr) { av_log(h->avctx, AV_LOG_ERROR, "unset cur_pic_ptr on %d. slice\n", h0->current_slice + 1); return AVERROR_INVALIDDATA; } } else { /* Shorten frame num gaps so we don't have to allocate reference * frames just to throw them away */ if (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0) { int unwrap_prev_frame_num = h->prev_frame_num; int max_frame_num = 1 << h->sps.log2_max_frame_num; if (unwrap_prev_frame_num > h->frame_num) unwrap_prev_frame_num -= max_frame_num; if ((h->frame_num - unwrap_prev_frame_num) > h->sps.ref_frame_count) { unwrap_prev_frame_num = (h->frame_num - h->sps.ref_frame_count) - 1; if (unwrap_prev_frame_num < 0) unwrap_prev_frame_num += max_frame_num; h->prev_frame_num = unwrap_prev_frame_num; } } /* See if we have a decoded first field looking for a pair... * Here, we're using that to see if we should mark previously * decode frames as "finished". * We have to do that before the "dummy" in-between frame allocation, * since that can modify h->cur_pic_ptr. */ if (h0->first_field) { assert(h0->cur_pic_ptr); assert(h0->cur_pic_ptr->f.data[0]); assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF); /* Mark old field/frame as completed */ if (!last_pic_droppable && h0->cur_pic_ptr->tf.owner == h0->avctx) { ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_BOTTOM_FIELD); } /* figure out if we have a complementary field pair */ if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) { /* Previous field is unmatched. Don't display it, but let it * remain for reference if marked as such. */ if (!last_pic_droppable && last_pic_structure != PICT_FRAME) { ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_TOP_FIELD); } } else { if (h0->cur_pic_ptr->frame_num != h->frame_num) { /* This and previous field were reference, but had * different frame_nums. Consider this field first in * pair. Throw away previous field except for reference * purposes. */ if (!last_pic_droppable && last_pic_structure != PICT_FRAME) { ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_TOP_FIELD); } } else { /* Second field in complementary pair */ if (!((last_pic_structure == PICT_TOP_FIELD && h->picture_structure == PICT_BOTTOM_FIELD) || (last_pic_structure == PICT_BOTTOM_FIELD && h->picture_structure == PICT_TOP_FIELD))) { av_log(h->avctx, AV_LOG_ERROR, "Invalid field mode combination %d/%d\n", last_pic_structure, h->picture_structure); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_INVALIDDATA; } else if (last_pic_droppable != h->droppable) { avpriv_request_sample(h->avctx, "Found reference and non-reference fields in the same frame, which"); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_PATCHWELCOME; } } } } while (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0 && !h0->first_field && h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) { Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL; av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n", h->frame_num, h->prev_frame_num); if (!h->sps.gaps_in_frame_num_allowed_flag) for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++) h->last_pocs[i] = INT_MIN; ret = h264_frame_start(h); if (ret < 0) return ret; h->prev_frame_num++; h->prev_frame_num %= 1 << h->sps.log2_max_frame_num; h->cur_pic_ptr->frame_num = h->prev_frame_num; ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0); ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1); ret = ff_generate_sliding_window_mmcos(h, 1); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return ret; ret = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return ret; /* Error concealment: If a ref is missing, copy the previous ref * in its place. * FIXME: Avoiding a memcpy would be nice, but ref handling makes * many assumptions about there being no actual duplicates. * FIXME: This does not copy padding for out-of-frame motion * vectors. Given we are concealing a lost frame, this probably * is not noticeable by comparison, but it should be fixed. */ if (h->short_ref_count) { if (prev) { av_image_copy(h->short_ref[0]->f.data, h->short_ref[0]->f.linesize, (const uint8_t **)prev->f.data, prev->f.linesize, h->avctx->pix_fmt, h->mb_width * 16, h->mb_height * 16); h->short_ref[0]->poc = prev->poc + 2; } h->short_ref[0]->frame_num = h->prev_frame_num; } } /* See if we have a decoded first field looking for a pair... * We're using that to see whether to continue decoding in that * frame, or to allocate a new one. */ if (h0->first_field) { assert(h0->cur_pic_ptr); assert(h0->cur_pic_ptr->f.data[0]); assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF); /* figure out if we have a complementary field pair */ if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) { /* Previous field is unmatched. Don't display it, but let it * remain for reference if marked as such. */ h0->cur_pic_ptr = NULL; h0->first_field = FIELD_PICTURE(h); } else { if (h0->cur_pic_ptr->frame_num != h->frame_num) { ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX, h0->picture_structure==PICT_BOTTOM_FIELD); /* This and the previous field had different frame_nums. * Consider this field first in pair. Throw away previous * one except for reference purposes. */ h0->first_field = 1; h0->cur_pic_ptr = NULL; } else { /* Second field in complementary pair */ h0->first_field = 0; } } } else { /* Frame or first field in a potentially complementary pair */ h0->first_field = FIELD_PICTURE(h); } if (!FIELD_PICTURE(h) || h0->first_field) { if (h264_frame_start(h) < 0) { h0->first_field = 0; return AVERROR_INVALIDDATA; } } else { release_unused_pictures(h, 0); } /* Some macroblocks can be accessed before they're available in case * of lost slices, MBAFF or threading. */ if (FIELD_PICTURE(h)) { for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++) memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table)); } else { memset(h->slice_table, -1, (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table)); } h0->last_slice_type = -1; } if (h != h0 && (ret = clone_slice(h, h0)) < 0) return ret; /* can't be in alloc_tables because linesize isn't known there. * FIXME: redo bipred weight to not require extra buffer? */ for (i = 0; i < h->slice_context_count; i++) if (h->thread_context[i]) { ret = alloc_scratch_buffers(h->thread_context[i], h->linesize); if (ret < 0) return ret; } h->cur_pic_ptr->frame_num = h->frame_num; // FIXME frame_num cleanup av_assert1(h->mb_num == h->mb_width * h->mb_height); if (first_mb_in_slice << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num || first_mb_in_slice >= h->mb_num) { av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n"); return AVERROR_INVALIDDATA; } h->resync_mb_x = h->mb_x = first_mb_in_slice % h->mb_width; h->resync_mb_y = h->mb_y = (first_mb_in_slice / h->mb_width) << FIELD_OR_MBAFF_PICTURE(h); if (h->picture_structure == PICT_BOTTOM_FIELD) h->resync_mb_y = h->mb_y = h->mb_y + 1; av_assert1(h->mb_y < h->mb_height); if (h->picture_structure == PICT_FRAME) { h->curr_pic_num = h->frame_num; h->max_pic_num = 1 << h->sps.log2_max_frame_num; } else { h->curr_pic_num = 2 * h->frame_num + 1; h->max_pic_num = 1 << (h->sps.log2_max_frame_num + 1); } if (h->nal_unit_type == NAL_IDR_SLICE) get_ue_golomb(&h->gb); /* idr_pic_id */ if (h->sps.poc_type == 0) { h->poc_lsb = get_bits(&h->gb, h->sps.log2_max_poc_lsb); if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME) h->delta_poc_bottom = get_se_golomb(&h->gb); } if (h->sps.poc_type == 1 && !h->sps.delta_pic_order_always_zero_flag) { h->delta_poc[0] = get_se_golomb(&h->gb); if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME) h->delta_poc[1] = get_se_golomb(&h->gb); } ff_init_poc(h, h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc); if (h->pps.redundant_pic_cnt_present) h->redundant_pic_count = get_ue_golomb(&h->gb); ret = ff_set_ref_count(h); if (ret < 0) return ret; if (slice_type != AV_PICTURE_TYPE_I && (h0->current_slice == 0 || slice_type != h0->last_slice_type || memcmp(h0->last_ref_count, h0->ref_count, sizeof(h0->ref_count)))) { ff_h264_fill_default_ref_list(h); } if (h->slice_type_nos != AV_PICTURE_TYPE_I) { ret = ff_h264_decode_ref_pic_list_reordering(h); if (ret < 0) { h->ref_count[1] = h->ref_count[0] = 0; return ret; } } if ((h->pps.weighted_pred && h->slice_type_nos == AV_PICTURE_TYPE_P) || (h->pps.weighted_bipred_idc == 1 && h->slice_type_nos == AV_PICTURE_TYPE_B)) ff_pred_weight_table(h); else if (h->pps.weighted_bipred_idc == 2 && h->slice_type_nos == AV_PICTURE_TYPE_B) { implicit_weight_table(h, -1); } else { h->use_weight = 0; for (i = 0; i < 2; i++) { h->luma_weight_flag[i] = 0; h->chroma_weight_flag[i] = 0; } } if (h->nal_ref_idc) { ret = ff_h264_decode_ref_pic_marking(h0, &h->gb, !(h->avctx->active_thread_type & FF_THREAD_FRAME) || h0->current_slice == 0); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return AVERROR_INVALIDDATA; } if (FRAME_MBAFF(h)) { ff_h264_fill_mbaff_ref_list(h); if (h->pps.weighted_bipred_idc == 2 && h->slice_type_nos == AV_PICTURE_TYPE_B) { implicit_weight_table(h, 0); implicit_weight_table(h, 1); } } if (h->slice_type_nos == AV_PICTURE_TYPE_B && !h->direct_spatial_mv_pred) ff_h264_direct_dist_scale_factor(h); ff_h264_direct_ref_list_init(h); if (h->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac) { tmp = get_ue_golomb_31(&h->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc overflow\n"); return AVERROR_INVALIDDATA; } h->cabac_init_idc = tmp; } h->last_qscale_diff = 0; tmp = h->pps.init_qp + get_se_golomb(&h->gb); if (tmp > 51 + 6 * (h->sps.bit_depth_luma - 8)) { av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp); return AVERROR_INVALIDDATA; } h->qscale = tmp; h->chroma_qp[0] = get_chroma_qp(h, 0, h->qscale); h->chroma_qp[1] = get_chroma_qp(h, 1, h->qscale); if (h->slice_type == AV_PICTURE_TYPE_SP) get_bits1(&h->gb); /* sp_for_switch_flag */ if (h->slice_type == AV_PICTURE_TYPE_SP || h->slice_type == AV_PICTURE_TYPE_SI) get_se_golomb(&h->gb); /* slice_qs_delta */ h->deblocking_filter = 1; h->slice_alpha_c0_offset = 52; h->slice_beta_offset = 52; if (h->pps.deblocking_filter_parameters_present) { tmp = get_ue_golomb_31(&h->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, "deblocking_filter_idc %u out of range\n", tmp); return AVERROR_INVALIDDATA; } h->deblocking_filter = tmp; if (h->deblocking_filter < 2) h->deblocking_filter ^= 1; // 1<->0 if (h->deblocking_filter) { h->slice_alpha_c0_offset += get_se_golomb(&h->gb) << 1; h->slice_beta_offset += get_se_golomb(&h->gb) << 1; if (h->slice_alpha_c0_offset > 104U || h->slice_beta_offset > 104U) { av_log(h->avctx, AV_LOG_ERROR, "deblocking filter parameters %d %d out of range\n", h->slice_alpha_c0_offset, h->slice_beta_offset); return AVERROR_INVALIDDATA; } } } if (h->avctx->skip_loop_filter >= AVDISCARD_ALL || (h->avctx->skip_loop_filter >= AVDISCARD_NONKEY && h->slice_type_nos != AV_PICTURE_TYPE_I) || (h->avctx->skip_loop_filter >= AVDISCARD_BIDIR && h->slice_type_nos == AV_PICTURE_TYPE_B) || (h->avctx->skip_loop_filter >= AVDISCARD_NONREF && h->nal_ref_idc == 0)) h->deblocking_filter = 0; if (h->deblocking_filter == 1 && h0->max_contexts > 1) { if (h->avctx->flags2 & CODEC_FLAG2_FAST) { /* Cheat slightly for speed: * Do not bother to deblock across slices. */ h->deblocking_filter = 2; } else { h0->max_contexts = 1; if (!h0->single_decode_warning) { av_log(h->avctx, AV_LOG_INFO, "Cannot parallelize deblocking type 1, decoding such frames in sequential order\n"); h0->single_decode_warning = 1; } if (h != h0) { av_log(h->avctx, AV_LOG_ERROR, "Deblocking switched inside frame.\n"); return 1; } } } h->qp_thresh = 15 + 52 - FFMIN(h->slice_alpha_c0_offset, h->slice_beta_offset) - FFMAX3(0, h->pps.chroma_qp_index_offset[0], h->pps.chroma_qp_index_offset[1]) + 6 * (h->sps.bit_depth_luma - 8); h0->last_slice_type = slice_type; memcpy(h0->last_ref_count, h0->ref_count, sizeof(h0->last_ref_count)); h->slice_num = ++h0->current_slice; if (h->slice_num) h0->slice_row[(h->slice_num-1)&(MAX_SLICES-1)]= h->resync_mb_y; if ( h0->slice_row[h->slice_num&(MAX_SLICES-1)] + 3 >= h->resync_mb_y && h0->slice_row[h->slice_num&(MAX_SLICES-1)] <= h->resync_mb_y && h->slice_num >= MAX_SLICES) { av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", h->slice_num, MAX_SLICES); } for (j = 0; j < 2; j++) { int id_list[16]; int *ref2frm = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][j]; for (i = 0; i < 16; i++) { id_list[i] = 60; if (j < h->list_count && i < h->ref_count[j] && h->ref_list[j][i].f.buf[0]) { int k; AVBuffer *buf = h->ref_list[j][i].f.buf[0]->buffer; for (k = 0; k < h->short_ref_count; k++) if (h->short_ref[k]->f.buf[0]->buffer == buf) { id_list[i] = k; break; } for (k = 0; k < h->long_ref_count; k++) if (h->long_ref[k] && h->long_ref[k]->f.buf[0]->buffer == buf) { id_list[i] = h->short_ref_count + k; break; } } } ref2frm[0] = ref2frm[1] = -1; for (i = 0; i < 16; i++) ref2frm[i + 2] = 4 * id_list[i] + (h->ref_list[j][i].reference & 3); ref2frm[18 + 0] = ref2frm[18 + 1] = -1; for (i = 16; i < 48; i++) ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] + (h->ref_list[j][i].reference & 3); } if (h->ref_count[0]) h->er.last_pic = &h->ref_list[0][0]; if (h->ref_count[1]) h->er.next_pic = &h->ref_list[1][0]; h->er.ref_count = h->ref_count[0]; if (h->avctx->debug & FF_DEBUG_PICT_INFO) { av_log(h->avctx, AV_LOG_DEBUG, "slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n", h->slice_num, (h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"), first_mb_in_slice, av_get_picture_type_char(h->slice_type), h->slice_type_fixed ? " fix" : "", h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "", pps_id, h->frame_num, h->cur_pic_ptr->field_poc[0], h->cur_pic_ptr->field_poc[1], h->ref_count[0], h->ref_count[1], h->qscale, h->deblocking_filter, h->slice_alpha_c0_offset / 2 - 26, h->slice_beta_offset / 2 - 26, h->use_weight, h->use_weight == 1 && h->use_weight_chroma ? "c" : "", h->slice_type == AV_PICTURE_TYPE_B ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : ""); } return 0; } Commit Message: avcodec/h264: do not trust last_pic_droppable when marking pictures as done This simplifies the code and fixes a deadlock Fixes Ticket2927 Signed-off-by: Michael Niedermayer <[email protected]> CWE ID:
static int decode_slice_header(H264Context *h, H264Context *h0) { unsigned int first_mb_in_slice; unsigned int pps_id; int ret; unsigned int slice_type, tmp, i, j; int last_pic_structure, last_pic_droppable; int must_reinit; int needs_reinit = 0; int field_pic_flag, bottom_field_flag; h->me.qpel_put = h->h264qpel.put_h264_qpel_pixels_tab; h->me.qpel_avg = h->h264qpel.avg_h264_qpel_pixels_tab; first_mb_in_slice = get_ue_golomb_long(&h->gb); if (first_mb_in_slice == 0) { // FIXME better field boundary detection if (h0->current_slice && FIELD_PICTURE(h)) { field_end(h, 1); } h0->current_slice = 0; if (!h0->first_field) { if (h->cur_pic_ptr && !h->droppable) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, h->picture_structure == PICT_BOTTOM_FIELD); } h->cur_pic_ptr = NULL; } } slice_type = get_ue_golomb_31(&h->gb); if (slice_type > 9) { av_log(h->avctx, AV_LOG_ERROR, "slice type too large (%d) at %d %d\n", slice_type, h->mb_x, h->mb_y); return AVERROR_INVALIDDATA; } if (slice_type > 4) { slice_type -= 5; h->slice_type_fixed = 1; } else h->slice_type_fixed = 0; slice_type = golomb_to_pict_type[slice_type]; h->slice_type = slice_type; h->slice_type_nos = slice_type & 3; h->pict_type = h->slice_type; pps_id = get_ue_golomb(&h->gb); if (pps_id >= MAX_PPS_COUNT) { av_log(h->avctx, AV_LOG_ERROR, "pps_id %d out of range\n", pps_id); return AVERROR_INVALIDDATA; } if (!h0->pps_buffers[pps_id]) { av_log(h->avctx, AV_LOG_ERROR, "non-existing PPS %u referenced\n", pps_id); return AVERROR_INVALIDDATA; } h->pps = *h0->pps_buffers[pps_id]; if (!h0->sps_buffers[h->pps.sps_id]) { av_log(h->avctx, AV_LOG_ERROR, "non-existing SPS %u referenced\n", h->pps.sps_id); return AVERROR_INVALIDDATA; } if (h->pps.sps_id != h->current_sps_id || h0->sps_buffers[h->pps.sps_id]->new) { h0->sps_buffers[h->pps.sps_id]->new = 0; h->current_sps_id = h->pps.sps_id; h->sps = *h0->sps_buffers[h->pps.sps_id]; if (h->mb_width != h->sps.mb_width || h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) || h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma || h->cur_chroma_format_idc != h->sps.chroma_format_idc ) needs_reinit = 1; if (h->bit_depth_luma != h->sps.bit_depth_luma || h->chroma_format_idc != h->sps.chroma_format_idc) { h->bit_depth_luma = h->sps.bit_depth_luma; h->chroma_format_idc = h->sps.chroma_format_idc; needs_reinit = 1; } if ((ret = h264_set_parameter_from_sps(h)) < 0) return ret; } h->avctx->profile = ff_h264_get_profile(&h->sps); h->avctx->level = h->sps.level_idc; h->avctx->refs = h->sps.ref_frame_count; must_reinit = (h->context_initialized && ( 16*h->sps.mb_width != h->avctx->coded_width || 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != h->avctx->coded_height || h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma || h->cur_chroma_format_idc != h->sps.chroma_format_idc || av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio) || h->mb_width != h->sps.mb_width || h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) )); if (h0->avctx->pix_fmt != get_pixel_format(h0, 0)) must_reinit = 1; h->mb_width = h->sps.mb_width; h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag); h->mb_num = h->mb_width * h->mb_height; h->mb_stride = h->mb_width + 1; h->b_stride = h->mb_width * 4; h->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p h->width = 16 * h->mb_width; h->height = 16 * h->mb_height; ret = init_dimensions(h); if (ret < 0) return ret; if (h->sps.video_signal_type_present_flag) { h->avctx->color_range = h->sps.full_range>0 ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; if (h->sps.colour_description_present_flag) { if (h->avctx->colorspace != h->sps.colorspace) needs_reinit = 1; h->avctx->color_primaries = h->sps.color_primaries; h->avctx->color_trc = h->sps.color_trc; h->avctx->colorspace = h->sps.colorspace; } } if (h->context_initialized && (h->width != h->avctx->coded_width || h->height != h->avctx->coded_height || must_reinit || needs_reinit)) { if (h != h0) { av_log(h->avctx, AV_LOG_ERROR, "changing width/height on " "slice %d\n", h0->current_slice + 1); return AVERROR_INVALIDDATA; } flush_change(h); if ((ret = get_pixel_format(h, 1)) < 0) return ret; h->avctx->pix_fmt = ret; av_log(h->avctx, AV_LOG_INFO, "Reinit context to %dx%d, " "pix_fmt: %s\n", h->width, h->height, av_get_pix_fmt_name(h->avctx->pix_fmt)); if ((ret = h264_slice_header_init(h, 1)) < 0) { av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed\n"); return ret; } } if (!h->context_initialized) { if (h != h0) { av_log(h->avctx, AV_LOG_ERROR, "Cannot (re-)initialize context during parallel decoding.\n"); return AVERROR_PATCHWELCOME; } if ((ret = get_pixel_format(h, 1)) < 0) return ret; h->avctx->pix_fmt = ret; if ((ret = h264_slice_header_init(h, 0)) < 0) { av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed\n"); return ret; } } if (h == h0 && h->dequant_coeff_pps != pps_id) { h->dequant_coeff_pps = pps_id; init_dequant_tables(h); } h->frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num); h->mb_mbaff = 0; h->mb_aff_frame = 0; last_pic_structure = h0->picture_structure; last_pic_droppable = h0->droppable; h->droppable = h->nal_ref_idc == 0; if (h->sps.frame_mbs_only_flag) { h->picture_structure = PICT_FRAME; } else { if (!h->sps.direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) { av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n"); return -1; } field_pic_flag = get_bits1(&h->gb); if (field_pic_flag) { bottom_field_flag = get_bits1(&h->gb); h->picture_structure = PICT_TOP_FIELD + bottom_field_flag; } else { h->picture_structure = PICT_FRAME; h->mb_aff_frame = h->sps.mb_aff; } } h->mb_field_decoding_flag = h->picture_structure != PICT_FRAME; if (h0->current_slice != 0) { if (last_pic_structure != h->picture_structure || last_pic_droppable != h->droppable) { av_log(h->avctx, AV_LOG_ERROR, "Changing field mode (%d -> %d) between slices is not allowed\n", last_pic_structure, h->picture_structure); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_INVALIDDATA; } else if (!h0->cur_pic_ptr) { av_log(h->avctx, AV_LOG_ERROR, "unset cur_pic_ptr on %d. slice\n", h0->current_slice + 1); return AVERROR_INVALIDDATA; } } else { /* Shorten frame num gaps so we don't have to allocate reference * frames just to throw them away */ if (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0) { int unwrap_prev_frame_num = h->prev_frame_num; int max_frame_num = 1 << h->sps.log2_max_frame_num; if (unwrap_prev_frame_num > h->frame_num) unwrap_prev_frame_num -= max_frame_num; if ((h->frame_num - unwrap_prev_frame_num) > h->sps.ref_frame_count) { unwrap_prev_frame_num = (h->frame_num - h->sps.ref_frame_count) - 1; if (unwrap_prev_frame_num < 0) unwrap_prev_frame_num += max_frame_num; h->prev_frame_num = unwrap_prev_frame_num; } } /* See if we have a decoded first field looking for a pair... * Here, we're using that to see if we should mark previously * decode frames as "finished". * We have to do that before the "dummy" in-between frame allocation, * since that can modify h->cur_pic_ptr. */ if (h0->first_field) { assert(h0->cur_pic_ptr); assert(h0->cur_pic_ptr->f.data[0]); assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF); /* Mark old field/frame as completed */ if (h0->cur_pic_ptr->tf.owner == h0->avctx) { ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_BOTTOM_FIELD); } /* figure out if we have a complementary field pair */ if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) { /* Previous field is unmatched. Don't display it, but let it * remain for reference if marked as such. */ if (last_pic_structure != PICT_FRAME) { ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_TOP_FIELD); } } else { if (h0->cur_pic_ptr->frame_num != h->frame_num) { /* This and previous field were reference, but had * different frame_nums. Consider this field first in * pair. Throw away previous field except for reference * purposes. */ if (last_pic_structure != PICT_FRAME) { ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_TOP_FIELD); } } else { /* Second field in complementary pair */ if (!((last_pic_structure == PICT_TOP_FIELD && h->picture_structure == PICT_BOTTOM_FIELD) || (last_pic_structure == PICT_BOTTOM_FIELD && h->picture_structure == PICT_TOP_FIELD))) { av_log(h->avctx, AV_LOG_ERROR, "Invalid field mode combination %d/%d\n", last_pic_structure, h->picture_structure); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_INVALIDDATA; } else if (last_pic_droppable != h->droppable) { avpriv_request_sample(h->avctx, "Found reference and non-reference fields in the same frame, which"); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_PATCHWELCOME; } } } } while (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0 && !h0->first_field && h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) { Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL; av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n", h->frame_num, h->prev_frame_num); if (!h->sps.gaps_in_frame_num_allowed_flag) for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++) h->last_pocs[i] = INT_MIN; ret = h264_frame_start(h); if (ret < 0) return ret; h->prev_frame_num++; h->prev_frame_num %= 1 << h->sps.log2_max_frame_num; h->cur_pic_ptr->frame_num = h->prev_frame_num; ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0); ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1); ret = ff_generate_sliding_window_mmcos(h, 1); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return ret; ret = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return ret; /* Error concealment: If a ref is missing, copy the previous ref * in its place. * FIXME: Avoiding a memcpy would be nice, but ref handling makes * many assumptions about there being no actual duplicates. * FIXME: This does not copy padding for out-of-frame motion * vectors. Given we are concealing a lost frame, this probably * is not noticeable by comparison, but it should be fixed. */ if (h->short_ref_count) { if (prev) { av_image_copy(h->short_ref[0]->f.data, h->short_ref[0]->f.linesize, (const uint8_t **)prev->f.data, prev->f.linesize, h->avctx->pix_fmt, h->mb_width * 16, h->mb_height * 16); h->short_ref[0]->poc = prev->poc + 2; } h->short_ref[0]->frame_num = h->prev_frame_num; } } /* See if we have a decoded first field looking for a pair... * We're using that to see whether to continue decoding in that * frame, or to allocate a new one. */ if (h0->first_field) { assert(h0->cur_pic_ptr); assert(h0->cur_pic_ptr->f.data[0]); assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF); /* figure out if we have a complementary field pair */ if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) { /* Previous field is unmatched. Don't display it, but let it * remain for reference if marked as such. */ h0->cur_pic_ptr = NULL; h0->first_field = FIELD_PICTURE(h); } else { if (h0->cur_pic_ptr->frame_num != h->frame_num) { ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX, h0->picture_structure==PICT_BOTTOM_FIELD); /* This and the previous field had different frame_nums. * Consider this field first in pair. Throw away previous * one except for reference purposes. */ h0->first_field = 1; h0->cur_pic_ptr = NULL; } else { /* Second field in complementary pair */ h0->first_field = 0; } } } else { /* Frame or first field in a potentially complementary pair */ h0->first_field = FIELD_PICTURE(h); } if (!FIELD_PICTURE(h) || h0->first_field) { if (h264_frame_start(h) < 0) { h0->first_field = 0; return AVERROR_INVALIDDATA; } } else { release_unused_pictures(h, 0); } /* Some macroblocks can be accessed before they're available in case * of lost slices, MBAFF or threading. */ if (FIELD_PICTURE(h)) { for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++) memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table)); } else { memset(h->slice_table, -1, (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table)); } h0->last_slice_type = -1; } if (h != h0 && (ret = clone_slice(h, h0)) < 0) return ret; /* can't be in alloc_tables because linesize isn't known there. * FIXME: redo bipred weight to not require extra buffer? */ for (i = 0; i < h->slice_context_count; i++) if (h->thread_context[i]) { ret = alloc_scratch_buffers(h->thread_context[i], h->linesize); if (ret < 0) return ret; } h->cur_pic_ptr->frame_num = h->frame_num; // FIXME frame_num cleanup av_assert1(h->mb_num == h->mb_width * h->mb_height); if (first_mb_in_slice << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num || first_mb_in_slice >= h->mb_num) { av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n"); return AVERROR_INVALIDDATA; } h->resync_mb_x = h->mb_x = first_mb_in_slice % h->mb_width; h->resync_mb_y = h->mb_y = (first_mb_in_slice / h->mb_width) << FIELD_OR_MBAFF_PICTURE(h); if (h->picture_structure == PICT_BOTTOM_FIELD) h->resync_mb_y = h->mb_y = h->mb_y + 1; av_assert1(h->mb_y < h->mb_height); if (h->picture_structure == PICT_FRAME) { h->curr_pic_num = h->frame_num; h->max_pic_num = 1 << h->sps.log2_max_frame_num; } else { h->curr_pic_num = 2 * h->frame_num + 1; h->max_pic_num = 1 << (h->sps.log2_max_frame_num + 1); } if (h->nal_unit_type == NAL_IDR_SLICE) get_ue_golomb(&h->gb); /* idr_pic_id */ if (h->sps.poc_type == 0) { h->poc_lsb = get_bits(&h->gb, h->sps.log2_max_poc_lsb); if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME) h->delta_poc_bottom = get_se_golomb(&h->gb); } if (h->sps.poc_type == 1 && !h->sps.delta_pic_order_always_zero_flag) { h->delta_poc[0] = get_se_golomb(&h->gb); if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME) h->delta_poc[1] = get_se_golomb(&h->gb); } ff_init_poc(h, h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc); if (h->pps.redundant_pic_cnt_present) h->redundant_pic_count = get_ue_golomb(&h->gb); ret = ff_set_ref_count(h); if (ret < 0) return ret; if (slice_type != AV_PICTURE_TYPE_I && (h0->current_slice == 0 || slice_type != h0->last_slice_type || memcmp(h0->last_ref_count, h0->ref_count, sizeof(h0->ref_count)))) { ff_h264_fill_default_ref_list(h); } if (h->slice_type_nos != AV_PICTURE_TYPE_I) { ret = ff_h264_decode_ref_pic_list_reordering(h); if (ret < 0) { h->ref_count[1] = h->ref_count[0] = 0; return ret; } } if ((h->pps.weighted_pred && h->slice_type_nos == AV_PICTURE_TYPE_P) || (h->pps.weighted_bipred_idc == 1 && h->slice_type_nos == AV_PICTURE_TYPE_B)) ff_pred_weight_table(h); else if (h->pps.weighted_bipred_idc == 2 && h->slice_type_nos == AV_PICTURE_TYPE_B) { implicit_weight_table(h, -1); } else { h->use_weight = 0; for (i = 0; i < 2; i++) { h->luma_weight_flag[i] = 0; h->chroma_weight_flag[i] = 0; } } if (h->nal_ref_idc) { ret = ff_h264_decode_ref_pic_marking(h0, &h->gb, !(h->avctx->active_thread_type & FF_THREAD_FRAME) || h0->current_slice == 0); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return AVERROR_INVALIDDATA; } if (FRAME_MBAFF(h)) { ff_h264_fill_mbaff_ref_list(h); if (h->pps.weighted_bipred_idc == 2 && h->slice_type_nos == AV_PICTURE_TYPE_B) { implicit_weight_table(h, 0); implicit_weight_table(h, 1); } } if (h->slice_type_nos == AV_PICTURE_TYPE_B && !h->direct_spatial_mv_pred) ff_h264_direct_dist_scale_factor(h); ff_h264_direct_ref_list_init(h); if (h->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac) { tmp = get_ue_golomb_31(&h->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc overflow\n"); return AVERROR_INVALIDDATA; } h->cabac_init_idc = tmp; } h->last_qscale_diff = 0; tmp = h->pps.init_qp + get_se_golomb(&h->gb); if (tmp > 51 + 6 * (h->sps.bit_depth_luma - 8)) { av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp); return AVERROR_INVALIDDATA; } h->qscale = tmp; h->chroma_qp[0] = get_chroma_qp(h, 0, h->qscale); h->chroma_qp[1] = get_chroma_qp(h, 1, h->qscale); if (h->slice_type == AV_PICTURE_TYPE_SP) get_bits1(&h->gb); /* sp_for_switch_flag */ if (h->slice_type == AV_PICTURE_TYPE_SP || h->slice_type == AV_PICTURE_TYPE_SI) get_se_golomb(&h->gb); /* slice_qs_delta */ h->deblocking_filter = 1; h->slice_alpha_c0_offset = 52; h->slice_beta_offset = 52; if (h->pps.deblocking_filter_parameters_present) { tmp = get_ue_golomb_31(&h->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, "deblocking_filter_idc %u out of range\n", tmp); return AVERROR_INVALIDDATA; } h->deblocking_filter = tmp; if (h->deblocking_filter < 2) h->deblocking_filter ^= 1; // 1<->0 if (h->deblocking_filter) { h->slice_alpha_c0_offset += get_se_golomb(&h->gb) << 1; h->slice_beta_offset += get_se_golomb(&h->gb) << 1; if (h->slice_alpha_c0_offset > 104U || h->slice_beta_offset > 104U) { av_log(h->avctx, AV_LOG_ERROR, "deblocking filter parameters %d %d out of range\n", h->slice_alpha_c0_offset, h->slice_beta_offset); return AVERROR_INVALIDDATA; } } } if (h->avctx->skip_loop_filter >= AVDISCARD_ALL || (h->avctx->skip_loop_filter >= AVDISCARD_NONKEY && h->slice_type_nos != AV_PICTURE_TYPE_I) || (h->avctx->skip_loop_filter >= AVDISCARD_BIDIR && h->slice_type_nos == AV_PICTURE_TYPE_B) || (h->avctx->skip_loop_filter >= AVDISCARD_NONREF && h->nal_ref_idc == 0)) h->deblocking_filter = 0; if (h->deblocking_filter == 1 && h0->max_contexts > 1) { if (h->avctx->flags2 & CODEC_FLAG2_FAST) { /* Cheat slightly for speed: * Do not bother to deblock across slices. */ h->deblocking_filter = 2; } else { h0->max_contexts = 1; if (!h0->single_decode_warning) { av_log(h->avctx, AV_LOG_INFO, "Cannot parallelize deblocking type 1, decoding such frames in sequential order\n"); h0->single_decode_warning = 1; } if (h != h0) { av_log(h->avctx, AV_LOG_ERROR, "Deblocking switched inside frame.\n"); return 1; } } } h->qp_thresh = 15 + 52 - FFMIN(h->slice_alpha_c0_offset, h->slice_beta_offset) - FFMAX3(0, h->pps.chroma_qp_index_offset[0], h->pps.chroma_qp_index_offset[1]) + 6 * (h->sps.bit_depth_luma - 8); h0->last_slice_type = slice_type; memcpy(h0->last_ref_count, h0->ref_count, sizeof(h0->last_ref_count)); h->slice_num = ++h0->current_slice; if (h->slice_num) h0->slice_row[(h->slice_num-1)&(MAX_SLICES-1)]= h->resync_mb_y; if ( h0->slice_row[h->slice_num&(MAX_SLICES-1)] + 3 >= h->resync_mb_y && h0->slice_row[h->slice_num&(MAX_SLICES-1)] <= h->resync_mb_y && h->slice_num >= MAX_SLICES) { av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", h->slice_num, MAX_SLICES); } for (j = 0; j < 2; j++) { int id_list[16]; int *ref2frm = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][j]; for (i = 0; i < 16; i++) { id_list[i] = 60; if (j < h->list_count && i < h->ref_count[j] && h->ref_list[j][i].f.buf[0]) { int k; AVBuffer *buf = h->ref_list[j][i].f.buf[0]->buffer; for (k = 0; k < h->short_ref_count; k++) if (h->short_ref[k]->f.buf[0]->buffer == buf) { id_list[i] = k; break; } for (k = 0; k < h->long_ref_count; k++) if (h->long_ref[k] && h->long_ref[k]->f.buf[0]->buffer == buf) { id_list[i] = h->short_ref_count + k; break; } } } ref2frm[0] = ref2frm[1] = -1; for (i = 0; i < 16; i++) ref2frm[i + 2] = 4 * id_list[i] + (h->ref_list[j][i].reference & 3); ref2frm[18 + 0] = ref2frm[18 + 1] = -1; for (i = 16; i < 48; i++) ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] + (h->ref_list[j][i].reference & 3); } if (h->ref_count[0]) h->er.last_pic = &h->ref_list[0][0]; if (h->ref_count[1]) h->er.next_pic = &h->ref_list[1][0]; h->er.ref_count = h->ref_count[0]; if (h->avctx->debug & FF_DEBUG_PICT_INFO) { av_log(h->avctx, AV_LOG_DEBUG, "slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n", h->slice_num, (h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"), first_mb_in_slice, av_get_picture_type_char(h->slice_type), h->slice_type_fixed ? " fix" : "", h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "", pps_id, h->frame_num, h->cur_pic_ptr->field_poc[0], h->cur_pic_ptr->field_poc[1], h->ref_count[0], h->ref_count[1], h->qscale, h->deblocking_filter, h->slice_alpha_c0_offset / 2 - 26, h->slice_beta_offset / 2 - 26, h->use_weight, h->use_weight == 1 && h->use_weight_chroma ? "c" : "", h->slice_type == AV_PICTURE_TYPE_B ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : ""); } return 0; }
165,932
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: void PixelBufferRasterWorkerPool::OnRasterTasksFinished() { if (!should_notify_client_if_no_tasks_are_pending_) return; CheckForCompletedRasterTasks(); } Commit Message: cc: Simplify raster task completion notification logic (Relanding after missing activation bug fixed in https://codereview.chromium.org/131763003/) Previously the pixel buffer raster worker pool used a combination of polling and explicit notifications from the raster worker pool to decide when to tell the client about the completion of 1) all tasks or 2) the subset of tasks required for activation. This patch simplifies the logic by only triggering the notification based on the OnRasterTasksFinished and OnRasterTasksRequiredForActivationFinished calls from the worker pool. BUG=307841,331534 Review URL: https://codereview.chromium.org/99873007 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@243991 0039d316-1c4b-4281-b951-d872f2087c98 CWE ID: CWE-20
void PixelBufferRasterWorkerPool::OnRasterTasksFinished() { if (!should_notify_client_if_no_tasks_are_pending_) return; raster_finished_task_pending_ = false; CheckForCompletedRasterTasks(); }
171,260
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: static int sock_close(struct inode *inode, struct file *filp) { sock_release(SOCKET_I(inode)); return 0; } Commit Message: socket: close race condition between sock_close() and sockfs_setattr() fchownat() doesn't even hold refcnt of fd until it figures out fd is really needed (otherwise is ignored) and releases it after it resolves the path. This means sock_close() could race with sockfs_setattr(), which leads to a NULL pointer dereference since typically we set sock->sk to NULL in ->release(). As pointed out by Al, this is unique to sockfs. So we can fix this in socket layer by acquiring inode_lock in sock_close() and checking against NULL in sockfs_setattr(). sock_release() is called in many places, only the sock_close() path matters here. And fortunately, this should not affect normal sock_close() as it is only called when the last fd refcnt is gone. It only affects sock_close() with a parallel sockfs_setattr() in progress, which is not common. Fixes: 86741ec25462 ("net: core: Add a UID field to struct sock.") Reported-by: shankarapailoor <[email protected]> Cc: Tetsuo Handa <[email protected]> Cc: Lorenzo Colitti <[email protected]> Cc: Al Viro <[email protected]> Signed-off-by: Cong Wang <[email protected]> Signed-off-by: David S. Miller <[email protected]> CWE ID: CWE-362
static int sock_close(struct inode *inode, struct file *filp) { __sock_release(SOCKET_I(inode), inode); return 0; }
169,203
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: is_link_trusted (NautilusFile *file, gboolean is_launcher) { GFile *location; gboolean res; if (!is_launcher) { return TRUE; } if (nautilus_file_can_execute (file)) { return TRUE; } res = FALSE; if (nautilus_file_is_local (file)) { location = nautilus_file_get_location (file); res = nautilus_is_in_system_dir (location); g_object_unref (location); } return res; } Commit Message: mime-actions: use file metadata for trusting desktop files Currently we only trust desktop files that have the executable bit set, and don't replace the displayed icon or the displayed name until it's trusted, which prevents for running random programs by a malicious desktop file. However, the executable permission is preserved if the desktop file comes from a compressed file. To prevent this, add a metadata::trusted metadata to the file once the user acknowledges the file as trusted. This adds metadata to the file, which cannot be added unless it has access to the computer. Also remove the SHEBANG "trusted" content we were putting inside the desktop file, since that doesn't add more security since it can come with the file itself. https://bugzilla.gnome.org/show_bug.cgi?id=777991 CWE ID: CWE-20
is_link_trusted (NautilusFile *file, gboolean is_launcher) { GFile *location; gboolean res; g_autofree gchar* trusted = NULL; if (!is_launcher) { return TRUE; } trusted = nautilus_file_get_metadata (file, NAUTILUS_METADATA_KEY_DESKTOP_FILE_TRUSTED, NULL); if (nautilus_file_can_execute (file) && trusted != NULL) { return TRUE; } res = FALSE; if (nautilus_file_is_local (file)) { location = nautilus_file_get_location (file); res = nautilus_is_in_system_dir (location); g_object_unref (location); } return res; }
167,746
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: image_transform_png_set_@_add(image_transform *this, PNG_CONST image_transform **that, char *name, size_t sizeof_name, size_t *pos, png_byte colour_type, png_byte bit_depth) { this->next = *that; *that = this; *pos = safecat(name, sizeof_name, *pos, " +@"); return 1; } Commit Message: DO NOT MERGE Update libpng to 1.6.20 BUG:23265085 Change-Id: I85199805636d771f3597b691b63bc0bf46084833 (cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82) CWE ID:
image_transform_png_set_@_add(image_transform *this,
173,600
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: png_set_filter(png_structp png_ptr, int method, int filters) { png_debug(1, "in png_set_filter"); if (png_ptr == NULL) return; #ifdef PNG_MNG_FEATURES_SUPPORTED if ((png_ptr->mng_features_permitted & PNG_FLAG_MNG_FILTER_64) && (method == PNG_INTRAPIXEL_DIFFERENCING)) method = PNG_FILTER_TYPE_BASE; #endif if (method == PNG_FILTER_TYPE_BASE) { switch (filters & (PNG_ALL_FILTERS | 0x07)) { #ifdef PNG_WRITE_FILTER_SUPPORTED case 5: case 6: case 7: png_warning(png_ptr, "Unknown row filter for method 0"); #endif /* PNG_WRITE_FILTER_SUPPORTED */ case PNG_FILTER_VALUE_NONE: png_ptr->do_filter = PNG_FILTER_NONE; break; #ifdef PNG_WRITE_FILTER_SUPPORTED case PNG_FILTER_VALUE_SUB: png_ptr->do_filter = PNG_FILTER_SUB; break; case PNG_FILTER_VALUE_UP: png_ptr->do_filter = PNG_FILTER_UP; break; case PNG_FILTER_VALUE_AVG: png_ptr->do_filter = PNG_FILTER_AVG; break; case PNG_FILTER_VALUE_PAETH: png_ptr->do_filter = PNG_FILTER_PAETH; break; default: png_ptr->do_filter = (png_byte)filters; break; #else default: png_warning(png_ptr, "Unknown row filter for method 0"); #endif /* PNG_WRITE_FILTER_SUPPORTED */ } /* If we have allocated the row_buf, this means we have already started * with the image and we should have allocated all of the filter buffers * that have been selected. If prev_row isn't already allocated, then * it is too late to start using the filters that need it, since we * will be missing the data in the previous row. If an application * wants to start and stop using particular filters during compression, * it should start out with all of the filters, and then add and * remove them after the start of compression. */ if (png_ptr->row_buf != NULL) { #ifdef PNG_WRITE_FILTER_SUPPORTED if ((png_ptr->do_filter & PNG_FILTER_SUB) && png_ptr->sub_row == NULL) { png_ptr->sub_row = (png_bytep)png_malloc(png_ptr, (png_ptr->rowbytes + 1)); png_ptr->sub_row[0] = PNG_FILTER_VALUE_SUB; } if ((png_ptr->do_filter & PNG_FILTER_UP) && png_ptr->up_row == NULL) { if (png_ptr->prev_row == NULL) { png_warning(png_ptr, "Can't add Up filter after starting"); png_ptr->do_filter &= ~PNG_FILTER_UP; } else { png_ptr->up_row = (png_bytep)png_malloc(png_ptr, (png_ptr->rowbytes + 1)); png_ptr->up_row[0] = PNG_FILTER_VALUE_UP; } } if ((png_ptr->do_filter & PNG_FILTER_AVG) && png_ptr->avg_row == NULL) { if (png_ptr->prev_row == NULL) { png_warning(png_ptr, "Can't add Average filter after starting"); png_ptr->do_filter &= ~PNG_FILTER_AVG; } else { png_ptr->avg_row = (png_bytep)png_malloc(png_ptr, (png_ptr->rowbytes + 1)); png_ptr->avg_row[0] = PNG_FILTER_VALUE_AVG; } } if ((png_ptr->do_filter & PNG_FILTER_PAETH) && png_ptr->paeth_row == NULL) { if (png_ptr->prev_row == NULL) { png_warning(png_ptr, "Can't add Paeth filter after starting"); png_ptr->do_filter &= (png_byte)(~PNG_FILTER_PAETH); } else { png_ptr->paeth_row = (png_bytep)png_malloc(png_ptr, (png_ptr->rowbytes + 1)); png_ptr->paeth_row[0] = PNG_FILTER_VALUE_PAETH; } } if (png_ptr->do_filter == PNG_NO_FILTERS) #endif /* PNG_WRITE_FILTER_SUPPORTED */ png_ptr->do_filter = PNG_FILTER_NONE; } } else png_error(png_ptr, "Unknown custom filter method"); } Commit Message: third_party/libpng: update to 1.2.54 [email protected] BUG=560291 Review URL: https://codereview.chromium.org/1467263003 Cr-Commit-Position: refs/heads/master@{#362298} CWE ID: CWE-119
png_set_filter(png_structp png_ptr, int method, int filters) { png_debug(1, "in png_set_filter"); if (png_ptr == NULL) return; #ifdef PNG_MNG_FEATURES_SUPPORTED if ((png_ptr->mng_features_permitted & PNG_FLAG_MNG_FILTER_64) && (method == PNG_INTRAPIXEL_DIFFERENCING)) method = PNG_FILTER_TYPE_BASE; #endif if (method == PNG_FILTER_TYPE_BASE) { switch (filters & (PNG_ALL_FILTERS | 0x07)) { case PNG_FILTER_VALUE_NONE: png_ptr->do_filter = PNG_FILTER_NONE; break; #ifdef PNG_WRITE_FILTER_SUPPORTED case 5: case 6: case 7: png_warning(png_ptr, "Unknown row filter for method 0"); break; case PNG_FILTER_VALUE_SUB: png_ptr->do_filter = PNG_FILTER_SUB; break; case PNG_FILTER_VALUE_UP: png_ptr->do_filter = PNG_FILTER_UP; break; case PNG_FILTER_VALUE_AVG: png_ptr->do_filter = PNG_FILTER_AVG; break; case PNG_FILTER_VALUE_PAETH: png_ptr->do_filter = PNG_FILTER_PAETH; break; default: png_ptr->do_filter = (png_byte)filters; break; #else default: png_warning(png_ptr, "Unknown row filter for method 0"); break; #endif /* PNG_WRITE_FILTER_SUPPORTED */ } /* If we have allocated the row_buf, this means we have already started * with the image and we should have allocated all of the filter buffers * that have been selected. If prev_row isn't already allocated, then * it is too late to start using the filters that need it, since we * will be missing the data in the previous row. If an application * wants to start and stop using particular filters during compression, * it should start out with all of the filters, and then add and * remove them after the start of compression. */ if (png_ptr->row_buf != NULL) { #ifdef PNG_WRITE_FILTER_SUPPORTED if ((png_ptr->do_filter & PNG_FILTER_SUB) && png_ptr->sub_row == NULL) { png_ptr->sub_row = (png_bytep)png_malloc(png_ptr, (png_ptr->rowbytes + 1)); png_ptr->sub_row[0] = PNG_FILTER_VALUE_SUB; } if ((png_ptr->do_filter & PNG_FILTER_UP) && png_ptr->up_row == NULL) { if (png_ptr->prev_row == NULL) { png_warning(png_ptr, "Can't add Up filter after starting"); png_ptr->do_filter &= ~PNG_FILTER_UP; } else { png_ptr->up_row = (png_bytep)png_malloc(png_ptr, (png_ptr->rowbytes + 1)); png_ptr->up_row[0] = PNG_FILTER_VALUE_UP; } } if ((png_ptr->do_filter & PNG_FILTER_AVG) && png_ptr->avg_row == NULL) { if (png_ptr->prev_row == NULL) { png_warning(png_ptr, "Can't add Average filter after starting"); png_ptr->do_filter &= ~PNG_FILTER_AVG; } else { png_ptr->avg_row = (png_bytep)png_malloc(png_ptr, (png_ptr->rowbytes + 1)); png_ptr->avg_row[0] = PNG_FILTER_VALUE_AVG; } } if ((png_ptr->do_filter & PNG_FILTER_PAETH) && png_ptr->paeth_row == NULL) { if (png_ptr->prev_row == NULL) { png_warning(png_ptr, "Can't add Paeth filter after starting"); png_ptr->do_filter &= (png_byte)(~PNG_FILTER_PAETH); } else { png_ptr->paeth_row = (png_bytep)png_malloc(png_ptr, (png_ptr->rowbytes + 1)); png_ptr->paeth_row[0] = PNG_FILTER_VALUE_PAETH; } } if (png_ptr->do_filter == PNG_NO_FILTERS) #endif /* PNG_WRITE_FILTER_SUPPORTED */ png_ptr->do_filter = PNG_FILTER_NONE; } } else png_error(png_ptr, "Unknown custom filter method"); }
172,187
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation.
Code: int main (int argc, char *argv[]) { int objectsCount = 0; Guint numOffset = 0; std::vector<Object> pages; std::vector<Guint> offsets; XRef *yRef, *countRef; FILE *f; OutStream *outStr; int i; int j, rootNum; std::vector<PDFDoc *>docs; int majorVersion = 0; int minorVersion = 0; char *fileName = argv[argc - 1]; int exitCode; exitCode = 99; const GBool ok = parseArgs (argDesc, &argc, argv); if (!ok || argc < 3 || printVersion || printHelp) { fprintf(stderr, "pdfunite version %s\n", PACKAGE_VERSION); fprintf(stderr, "%s\n", popplerCopyright); fprintf(stderr, "%s\n", xpdfCopyright); if (!printVersion) { printUsage("pdfunite", "<PDF-sourcefile-1>..<PDF-sourcefile-n> <PDF-destfile>", argDesc); } if (printVersion || printHelp) exitCode = 0; return exitCode; } exitCode = 0; globalParams = new GlobalParams(); for (i = 1; i < argc - 1; i++) { GooString *gfileName = new GooString(argv[i]); PDFDoc *doc = new PDFDoc(gfileName, NULL, NULL, NULL); if (doc->isOk() && !doc->isEncrypted()) { docs.push_back(doc); if (doc->getPDFMajorVersion() > majorVersion) { majorVersion = doc->getPDFMajorVersion(); minorVersion = doc->getPDFMinorVersion(); } else if (doc->getPDFMajorVersion() == majorVersion) { if (doc->getPDFMinorVersion() > minorVersion) { minorVersion = doc->getPDFMinorVersion(); } } } else if (doc->isOk()) { error(errUnimplemented, -1, "Could not merge encrypted files ('{0:s}')", argv[i]); return -1; } else { error(errSyntaxError, -1, "Could not merge damaged documents ('{0:s}')", argv[i]); return -1; } } if (!(f = fopen(fileName, "wb"))) { error(errIO, -1, "Could not open file '{0:s}'", fileName); return -1; } outStr = new FileOutStream(f, 0); yRef = new XRef(); countRef = new XRef(); yRef->add(0, 65535, 0, gFalse); PDFDoc::writeHeader(outStr, majorVersion, minorVersion); Object intents; Object afObj; Object ocObj; Object names; if (docs.size() >= 1) { Object catObj; docs[0]->getXRef()->getCatalog(&catObj); Dict *catDict = catObj.getDict(); catDict->lookup("OutputIntents", &intents); catDict->lookupNF("AcroForm", &afObj); Ref *refPage = docs[0]->getCatalog()->getPageRef(1); if (!afObj.isNull()) { docs[0]->markAcroForm(&afObj, yRef, countRef, 0, refPage->num, refPage->num); } catDict->lookupNF("OCProperties", &ocObj); if (!ocObj.isNull() && ocObj.isDict()) { docs[0]->markPageObjects(ocObj.getDict(), yRef, countRef, 0, refPage->num, refPage->num); } catDict->lookup("Names", &names); if (!names.isNull() && names.isDict()) { docs[0]->markPageObjects(names.getDict(), yRef, countRef, 0, refPage->num, refPage->num); } if (intents.isArray() && intents.arrayGetLength() > 0) { for (i = 1; i < (int) docs.size(); i++) { Object pagecatObj, pageintents; docs[i]->getXRef()->getCatalog(&pagecatObj); Dict *pagecatDict = pagecatObj.getDict(); pagecatDict->lookup("OutputIntents", &pageintents); if (pageintents.isArray() && pageintents.arrayGetLength() > 0) { for (j = intents.arrayGetLength() - 1; j >= 0; j--) { Object intent; intents.arrayGet(j, &intent, 0); if (intent.isDict()) { Object idf; intent.dictLookup("OutputConditionIdentifier", &idf); if (idf.isString()) { GooString *gidf = idf.getString(); GBool removeIntent = gTrue; for (int k = 0; k < pageintents.arrayGetLength(); k++) { Object pgintent; pageintents.arrayGet(k, &pgintent, 0); if (pgintent.isDict()) { Object pgidf; pgintent.dictLookup("OutputConditionIdentifier", &pgidf); if (pgidf.isString()) { GooString *gpgidf = pgidf.getString(); if (gpgidf->cmp(gidf) == 0) { pgidf.free(); removeIntent = gFalse; break; } } pgidf.free(); } } if (removeIntent) { intents.arrayRemove(j); error(errSyntaxWarning, -1, "Output intent {0:s} missing in pdf {1:s}, removed", gidf->getCString(), docs[i]->getFileName()->getCString()); } } else { intents.arrayRemove(j); error(errSyntaxWarning, -1, "Invalid output intent dict, missing required OutputConditionIdentifier"); } idf.free(); } else { intents.arrayRemove(j); } intent.free(); } } else { error(errSyntaxWarning, -1, "Output intents differs, remove them all"); intents.free(); break; } pagecatObj.free(); pageintents.free(); } } if (intents.isArray() && intents.arrayGetLength() > 0) { for (j = intents.arrayGetLength() - 1; j >= 0; j--) { Object intent; intents.arrayGet(j, &intent, 0); if (intent.isDict()) { docs[0]->markPageObjects(intent.getDict(), yRef, countRef, numOffset, 0, 0); } else { intents.arrayRemove(j); } intent.free(); } } catObj.free(); } for (i = 0; i < (int) docs.size(); i++) { for (j = 1; j <= docs[i]->getNumPages(); j++) { PDFRectangle *cropBox = NULL; if (docs[i]->getCatalog()->getPage(j)->isCropped()) cropBox = docs[i]->getCatalog()->getPage(j)->getCropBox(); Object page; docs[i]->getXRef()->fetch(refPage->num, refPage->gen, &page); Dict *pageDict = page.getDict(); Dict *resDict = docs[i]->getCatalog()->getPage(j)->getResourceDict(); if (resDict) { Object *newResource = new Object(); newResource->initDict(resDict); pageDict->set("Resources", newResource); delete newResource; } pages.push_back(page); offsets.push_back(numOffset); docs[i]->markPageObjects(pageDict, yRef, countRef, numOffset, refPage->num, refPage->num); Object annotsObj; pageDict->lookupNF("Annots", &annotsObj); if (!annotsObj.isNull()) { docs[i]->markAnnotations(&annotsObj, yRef, countRef, numOffset, refPage->num, refPage->num); annotsObj.free(); } } Object pageCatObj, pageNames, pageForm; docs[i]->getXRef()->getCatalog(&pageCatObj); Dict *pageCatDict = pageCatObj.getDict(); pageCatDict->lookup("Names", &pageNames); if (!pageNames.isNull() && pageNames.isDict()) { if (!names.isDict()) { names.free(); names.initDict(yRef); } doMergeNameDict(docs[i], yRef, countRef, 0, 0, names.getDict(), pageNames.getDict(), numOffset); } pageCatDict->lookup("AcroForm", &pageForm); if (i > 0 && !pageForm.isNull() && pageForm.isDict()) { if (afObj.isNull()) { pageCatDict->lookupNF("AcroForm", &afObj); } else if (afObj.isDict()) { doMergeFormDict(afObj.getDict(), pageForm.getDict(), numOffset); } } pageForm.free(); pageNames.free(); pageCatObj.free(); objectsCount += docs[i]->writePageObjects(outStr, yRef, numOffset, gTrue); numOffset = yRef->getNumObjects() + 1; } rootNum = yRef->getNumObjects() + 1; yRef->add(rootNum, 0, outStr->getPos(), gTrue); outStr->printf("%d 0 obj\n", rootNum); outStr->printf("<< /Type /Catalog /Pages %d 0 R", rootNum + 1); if (intents.isArray() && intents.arrayGetLength() > 0) { outStr->printf(" /OutputIntents ["); for (j = 0; j < intents.arrayGetLength(); j++) { Object intent; intents.arrayGet(j, &intent, 0); if (intent.isDict()) { PDFDoc::writeObject(&intent, outStr, yRef, 0, NULL, cryptRC4, 0, 0, 0); } intent.free(); } outStr->printf("]"); } intents.free(); if (!afObj.isNull()) { outStr->printf(" /AcroForm "); PDFDoc::writeObject(&afObj, outStr, yRef, 0, NULL, cryptRC4, 0, 0, 0); afObj.free(); } if (!ocObj.isNull() && ocObj.isDict()) { outStr->printf(" /OCProperties "); PDFDoc::writeObject(&ocObj, outStr, yRef, 0, NULL, cryptRC4, 0, 0, 0); ocObj.free(); } if (!names.isNull() && names.isDict()) { outStr->printf(" /Names "); PDFDoc::writeObject(&names, outStr, yRef, 0, NULL, cryptRC4, 0, 0, 0); names.free(); } outStr->printf(">>\nendobj\n"); objectsCount++; yRef->add(rootNum + 1, 0, outStr->getPos(), gTrue); outStr->printf("%d 0 obj\n", rootNum + 1); outStr->printf("<< /Type /Pages /Kids ["); for (j = 0; j < (int) pages.size(); j++) outStr->printf(" %d 0 R", rootNum + j + 2); outStr->printf(" ] /Count %zd >>\nendobj\n", pages.size()); objectsCount++; for (i = 0; i < (int) pages.size(); i++) { yRef->add(rootNum + i + 2, 0, outStr->getPos(), gTrue); outStr->printf("%d 0 obj\n", rootNum + i + 2); outStr->printf("<< "); Dict *pageDict = pages[i].getDict(); for (j = 0; j < pageDict->getLength(); j++) { if (j > 0) outStr->printf(" "); const char *key = pageDict->getKey(j); Object value; pageDict->getValNF(j, &value); if (strcmp(key, "Parent") == 0) { outStr->printf("/Parent %d 0 R", rootNum + 1); } else { outStr->printf("/%s ", key); PDFDoc::writeObject(&value, outStr, yRef, offsets[i], NULL, cryptRC4, 0, 0, 0); } value.free(); } outStr->printf(" >>\nendobj\n"); objectsCount++; } Goffset uxrefOffset = outStr->getPos(); Ref ref; ref.num = rootNum; ref.gen = 0; Dict *trailerDict = PDFDoc::createTrailerDict(objectsCount, gFalse, 0, &ref, yRef, fileName, outStr->getPos()); PDFDoc::writeXRefTableTrailer(trailerDict, yRef, gTrue, // write all entries according to ISO 32000-1, 7.5.4 Cross-Reference Table: "For a file that has never been incrementally updated, the cross-reference section shall contain only one subsection, whose object numbering begins at 0." uxrefOffset, outStr, yRef); delete trailerDict; outStr->close(); delete outStr; fclose(f); delete yRef; delete countRef; for (j = 0; j < (int) pages.size (); j++) pages[j].free(); for (i = 0; i < (int) docs.size (); i++) delete docs[i]; delete globalParams; return exitCode; } Commit Message: CWE ID: CWE-476
int main (int argc, char *argv[]) { int objectsCount = 0; Guint numOffset = 0; std::vector<Object> pages; std::vector<Guint> offsets; XRef *yRef, *countRef; FILE *f; OutStream *outStr; int i; int j, rootNum; std::vector<PDFDoc *>docs; int majorVersion = 0; int minorVersion = 0; char *fileName = argv[argc - 1]; int exitCode; exitCode = 99; const GBool ok = parseArgs (argDesc, &argc, argv); if (!ok || argc < 3 || printVersion || printHelp) { fprintf(stderr, "pdfunite version %s\n", PACKAGE_VERSION); fprintf(stderr, "%s\n", popplerCopyright); fprintf(stderr, "%s\n", xpdfCopyright); if (!printVersion) { printUsage("pdfunite", "<PDF-sourcefile-1>..<PDF-sourcefile-n> <PDF-destfile>", argDesc); } if (printVersion || printHelp) exitCode = 0; return exitCode; } exitCode = 0; globalParams = new GlobalParams(); for (i = 1; i < argc - 1; i++) { GooString *gfileName = new GooString(argv[i]); PDFDoc *doc = new PDFDoc(gfileName, NULL, NULL, NULL); if (doc->isOk() && !doc->isEncrypted()) { docs.push_back(doc); if (doc->getPDFMajorVersion() > majorVersion) { majorVersion = doc->getPDFMajorVersion(); minorVersion = doc->getPDFMinorVersion(); } else if (doc->getPDFMajorVersion() == majorVersion) { if (doc->getPDFMinorVersion() > minorVersion) { minorVersion = doc->getPDFMinorVersion(); } } } else if (doc->isOk()) { error(errUnimplemented, -1, "Could not merge encrypted files ('{0:s}')", argv[i]); return -1; } else { error(errSyntaxError, -1, "Could not merge damaged documents ('{0:s}')", argv[i]); return -1; } } if (!(f = fopen(fileName, "wb"))) { error(errIO, -1, "Could not open file '{0:s}'", fileName); return -1; } outStr = new FileOutStream(f, 0); yRef = new XRef(); countRef = new XRef(); yRef->add(0, 65535, 0, gFalse); PDFDoc::writeHeader(outStr, majorVersion, minorVersion); Object intents; Object afObj; Object ocObj; Object names; if (docs.size() >= 1) { Object catObj; docs[0]->getXRef()->getCatalog(&catObj); Dict *catDict = catObj.getDict(); catDict->lookup("OutputIntents", &intents); catDict->lookupNF("AcroForm", &afObj); Ref *refPage = docs[0]->getCatalog()->getPageRef(1); if (!afObj.isNull() && refPage) { docs[0]->markAcroForm(&afObj, yRef, countRef, 0, refPage->num, refPage->num); } catDict->lookupNF("OCProperties", &ocObj); if (!ocObj.isNull() && ocObj.isDict() && refPage) { docs[0]->markPageObjects(ocObj.getDict(), yRef, countRef, 0, refPage->num, refPage->num); } catDict->lookup("Names", &names); if (!names.isNull() && names.isDict() && refPage) { docs[0]->markPageObjects(names.getDict(), yRef, countRef, 0, refPage->num, refPage->num); } if (intents.isArray() && intents.arrayGetLength() > 0) { for (i = 1; i < (int) docs.size(); i++) { Object pagecatObj, pageintents; docs[i]->getXRef()->getCatalog(&pagecatObj); Dict *pagecatDict = pagecatObj.getDict(); pagecatDict->lookup("OutputIntents", &pageintents); if (pageintents.isArray() && pageintents.arrayGetLength() > 0) { for (j = intents.arrayGetLength() - 1; j >= 0; j--) { Object intent; intents.arrayGet(j, &intent, 0); if (intent.isDict()) { Object idf; intent.dictLookup("OutputConditionIdentifier", &idf); if (idf.isString()) { GooString *gidf = idf.getString(); GBool removeIntent = gTrue; for (int k = 0; k < pageintents.arrayGetLength(); k++) { Object pgintent; pageintents.arrayGet(k, &pgintent, 0); if (pgintent.isDict()) { Object pgidf; pgintent.dictLookup("OutputConditionIdentifier", &pgidf); if (pgidf.isString()) { GooString *gpgidf = pgidf.getString(); if (gpgidf->cmp(gidf) == 0) { pgidf.free(); removeIntent = gFalse; break; } } pgidf.free(); } } if (removeIntent) { intents.arrayRemove(j); error(errSyntaxWarning, -1, "Output intent {0:s} missing in pdf {1:s}, removed", gidf->getCString(), docs[i]->getFileName()->getCString()); } } else { intents.arrayRemove(j); error(errSyntaxWarning, -1, "Invalid output intent dict, missing required OutputConditionIdentifier"); } idf.free(); } else { intents.arrayRemove(j); } intent.free(); } } else { error(errSyntaxWarning, -1, "Output intents differs, remove them all"); intents.free(); break; } pagecatObj.free(); pageintents.free(); } } if (intents.isArray() && intents.arrayGetLength() > 0) { for (j = intents.arrayGetLength() - 1; j >= 0; j--) { Object intent; intents.arrayGet(j, &intent, 0); if (intent.isDict()) { docs[0]->markPageObjects(intent.getDict(), yRef, countRef, numOffset, 0, 0); } else { intents.arrayRemove(j); } intent.free(); } } catObj.free(); } for (i = 0; i < (int) docs.size(); i++) { for (j = 1; j <= docs[i]->getNumPages(); j++) { if (!docs[i]->getCatalog()->getPage(j)) { continue; } PDFRectangle *cropBox = NULL; if (docs[i]->getCatalog()->getPage(j)->isCropped()) cropBox = docs[i]->getCatalog()->getPage(j)->getCropBox(); Object page; docs[i]->getXRef()->fetch(refPage->num, refPage->gen, &page); Dict *pageDict = page.getDict(); Dict *resDict = docs[i]->getCatalog()->getPage(j)->getResourceDict(); if (resDict) { Object *newResource = new Object(); newResource->initDict(resDict); pageDict->set("Resources", newResource); delete newResource; } pages.push_back(page); offsets.push_back(numOffset); docs[i]->markPageObjects(pageDict, yRef, countRef, numOffset, refPage->num, refPage->num); Object annotsObj; pageDict->lookupNF("Annots", &annotsObj); if (!annotsObj.isNull()) { docs[i]->markAnnotations(&annotsObj, yRef, countRef, numOffset, refPage->num, refPage->num); annotsObj.free(); } } Object pageCatObj, pageNames, pageForm; docs[i]->getXRef()->getCatalog(&pageCatObj); Dict *pageCatDict = pageCatObj.getDict(); pageCatDict->lookup("Names", &pageNames); if (!pageNames.isNull() && pageNames.isDict()) { if (!names.isDict()) { names.free(); names.initDict(yRef); } doMergeNameDict(docs[i], yRef, countRef, 0, 0, names.getDict(), pageNames.getDict(), numOffset); } pageCatDict->lookup("AcroForm", &pageForm); if (i > 0 && !pageForm.isNull() && pageForm.isDict()) { if (afObj.isNull()) { pageCatDict->lookupNF("AcroForm", &afObj); } else if (afObj.isDict()) { doMergeFormDict(afObj.getDict(), pageForm.getDict(), numOffset); } } pageForm.free(); pageNames.free(); pageCatObj.free(); objectsCount += docs[i]->writePageObjects(outStr, yRef, numOffset, gTrue); numOffset = yRef->getNumObjects() + 1; } rootNum = yRef->getNumObjects() + 1; yRef->add(rootNum, 0, outStr->getPos(), gTrue); outStr->printf("%d 0 obj\n", rootNum); outStr->printf("<< /Type /Catalog /Pages %d 0 R", rootNum + 1); if (intents.isArray() && intents.arrayGetLength() > 0) { outStr->printf(" /OutputIntents ["); for (j = 0; j < intents.arrayGetLength(); j++) { Object intent; intents.arrayGet(j, &intent, 0); if (intent.isDict()) { PDFDoc::writeObject(&intent, outStr, yRef, 0, NULL, cryptRC4, 0, 0, 0); } intent.free(); } outStr->printf("]"); } intents.free(); if (!afObj.isNull()) { outStr->printf(" /AcroForm "); PDFDoc::writeObject(&afObj, outStr, yRef, 0, NULL, cryptRC4, 0, 0, 0); afObj.free(); } if (!ocObj.isNull() && ocObj.isDict()) { outStr->printf(" /OCProperties "); PDFDoc::writeObject(&ocObj, outStr, yRef, 0, NULL, cryptRC4, 0, 0, 0); ocObj.free(); } if (!names.isNull() && names.isDict()) { outStr->printf(" /Names "); PDFDoc::writeObject(&names, outStr, yRef, 0, NULL, cryptRC4, 0, 0, 0); names.free(); } outStr->printf(">>\nendobj\n"); objectsCount++; yRef->add(rootNum + 1, 0, outStr->getPos(), gTrue); outStr->printf("%d 0 obj\n", rootNum + 1); outStr->printf("<< /Type /Pages /Kids ["); for (j = 0; j < (int) pages.size(); j++) outStr->printf(" %d 0 R", rootNum + j + 2); outStr->printf(" ] /Count %zd >>\nendobj\n", pages.size()); objectsCount++; for (i = 0; i < (int) pages.size(); i++) { yRef->add(rootNum + i + 2, 0, outStr->getPos(), gTrue); outStr->printf("%d 0 obj\n", rootNum + i + 2); outStr->printf("<< "); Dict *pageDict = pages[i].getDict(); for (j = 0; j < pageDict->getLength(); j++) { if (j > 0) outStr->printf(" "); const char *key = pageDict->getKey(j); Object value; pageDict->getValNF(j, &value); if (strcmp(key, "Parent") == 0) { outStr->printf("/Parent %d 0 R", rootNum + 1); } else { outStr->printf("/%s ", key); PDFDoc::writeObject(&value, outStr, yRef, offsets[i], NULL, cryptRC4, 0, 0, 0); } value.free(); } outStr->printf(" >>\nendobj\n"); objectsCount++; } Goffset uxrefOffset = outStr->getPos(); Ref ref; ref.num = rootNum; ref.gen = 0; Dict *trailerDict = PDFDoc::createTrailerDict(objectsCount, gFalse, 0, &ref, yRef, fileName, outStr->getPos()); PDFDoc::writeXRefTableTrailer(trailerDict, yRef, gTrue, // write all entries according to ISO 32000-1, 7.5.4 Cross-Reference Table: "For a file that has never been incrementally updated, the cross-reference section shall contain only one subsection, whose object numbering begins at 0." uxrefOffset, outStr, yRef); delete trailerDict; outStr->close(); delete outStr; fclose(f); delete yRef; delete countRef; for (j = 0; j < (int) pages.size (); j++) pages[j].free(); for (i = 0; i < (int) docs.size (); i++) delete docs[i]; delete globalParams; return exitCode; }
164,890