instruction
stringclasses 1
value | input
stringlengths 90
139k
| output
stringlengths 16
138k
| __index_level_0__
int64 165k
175k
|
---|---|---|---|
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void BluetoothDeviceChooserController::GetDevice(
blink::mojom::WebBluetoothRequestDeviceOptionsPtr options,
const SuccessCallback& success_callback,
const ErrorCallback& error_callback) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
DCHECK(success_callback_.is_null());
DCHECK(error_callback_.is_null());
success_callback_ = success_callback;
error_callback_ = error_callback;
options_ = std::move(options);
LogRequestDeviceOptions(options_);
if (options_->filters &&
BluetoothBlocklist::Get().IsExcluded(options_->filters.value())) {
RecordRequestDeviceOutcome(
UMARequestDeviceOutcome::BLOCKLISTED_SERVICE_IN_FILTER);
PostErrorCallback(
blink::mojom::WebBluetoothResult::REQUEST_DEVICE_WITH_BLOCKLISTED_UUID);
return;
}
BluetoothBlocklist::Get().RemoveExcludedUUIDs(options_.get());
const url::Origin requesting_origin =
render_frame_host_->GetLastCommittedOrigin();
const url::Origin embedding_origin =
web_contents_->GetMainFrame()->GetLastCommittedOrigin();
if (!embedding_origin.IsSameOriginWith(requesting_origin)) {
PostErrorCallback(blink::mojom::WebBluetoothResult::
REQUEST_DEVICE_FROM_CROSS_ORIGIN_IFRAME);
return;
}
DCHECK(!requesting_origin.opaque());
if (!adapter_->IsPresent()) {
DVLOG(1) << "Bluetooth Adapter not present. Can't serve requestDevice.";
RecordRequestDeviceOutcome(
UMARequestDeviceOutcome::BLUETOOTH_ADAPTER_NOT_PRESENT);
PostErrorCallback(blink::mojom::WebBluetoothResult::NO_BLUETOOTH_ADAPTER);
return;
}
switch (GetContentClient()->browser()->AllowWebBluetooth(
web_contents_->GetBrowserContext(), requesting_origin,
embedding_origin)) {
case ContentBrowserClient::AllowWebBluetoothResult::BLOCK_POLICY: {
RecordRequestDeviceOutcome(
UMARequestDeviceOutcome::BLUETOOTH_CHOOSER_POLICY_DISABLED);
PostErrorCallback(blink::mojom::WebBluetoothResult::
CHOOSER_NOT_SHOWN_API_LOCALLY_DISABLED);
return;
}
case ContentBrowserClient::AllowWebBluetoothResult::
BLOCK_GLOBALLY_DISABLED: {
web_contents_->GetMainFrame()->AddMessageToConsole(
blink::mojom::ConsoleMessageLevel::kInfo,
"Bluetooth permission has been blocked.");
RecordRequestDeviceOutcome(
UMARequestDeviceOutcome::BLUETOOTH_GLOBALLY_DISABLED);
PostErrorCallback(blink::mojom::WebBluetoothResult::
CHOOSER_NOT_SHOWN_API_GLOBALLY_DISABLED);
return;
}
case ContentBrowserClient::AllowWebBluetoothResult::ALLOW:
break;
}
BluetoothChooser::EventHandler chooser_event_handler =
base::Bind(&BluetoothDeviceChooserController::OnBluetoothChooserEvent,
base::Unretained(this));
if (WebContentsDelegate* delegate = web_contents_->GetDelegate()) {
chooser_ = delegate->RunBluetoothChooser(render_frame_host_,
std::move(chooser_event_handler));
}
if (!chooser_.get()) {
PostErrorCallback(
blink::mojom::WebBluetoothResult::WEB_BLUETOOTH_NOT_SUPPORTED);
return;
}
if (!chooser_->CanAskForScanningPermission()) {
DVLOG(1) << "Closing immediately because Chooser cannot obtain permission.";
OnBluetoothChooserEvent(BluetoothChooser::Event::DENIED_PERMISSION,
"" /* device_address */);
return;
}
device_ids_.clear();
PopulateConnectedDevices();
if (!chooser_.get()) {
return;
}
if (!adapter_->IsPowered()) {
chooser_->SetAdapterPresence(
BluetoothChooser::AdapterPresence::POWERED_OFF);
return;
}
StartDeviceDiscovery();
}
Commit Message: bluetooth: Implement getAvailability()
This change implements the getAvailability() method for
navigator.bluetooth as defined in the specification.
Bug: 707640
Change-Id: I9e9b3e7f8ea7f259e975f71cb6d9570e5f04b479
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1651516
Reviewed-by: Chris Harrelson <[email protected]>
Reviewed-by: Giovanni Ortuño Urquidi <[email protected]>
Reviewed-by: Kinuko Yasuda <[email protected]>
Commit-Queue: Ovidio de Jesús Ruiz-Henríquez <[email protected]>
Auto-Submit: Ovidio de Jesús Ruiz-Henríquez <[email protected]>
Cr-Commit-Position: refs/heads/master@{#688987}
CWE ID: CWE-119 | void BluetoothDeviceChooserController::GetDevice(
blink::mojom::WebBluetoothRequestDeviceOptionsPtr options,
const SuccessCallback& success_callback,
const ErrorCallback& error_callback) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
DCHECK(success_callback_.is_null());
DCHECK(error_callback_.is_null());
success_callback_ = success_callback;
error_callback_ = error_callback;
options_ = std::move(options);
LogRequestDeviceOptions(options_);
if (options_->filters &&
BluetoothBlocklist::Get().IsExcluded(options_->filters.value())) {
RecordRequestDeviceOutcome(
UMARequestDeviceOutcome::BLOCKLISTED_SERVICE_IN_FILTER);
PostErrorCallback(WebBluetoothResult::REQUEST_DEVICE_WITH_BLOCKLISTED_UUID);
return;
}
BluetoothBlocklist::Get().RemoveExcludedUUIDs(options_.get());
WebBluetoothResult allow_result =
web_bluetooth_service_->GetBluetoothAllowed();
if (allow_result != WebBluetoothResult::SUCCESS) {
switch (allow_result) {
case WebBluetoothResult::CHOOSER_NOT_SHOWN_API_LOCALLY_DISABLED: {
RecordRequestDeviceOutcome(
UMARequestDeviceOutcome::BLUETOOTH_CHOOSER_POLICY_DISABLED);
break;
}
case WebBluetoothResult::CHOOSER_NOT_SHOWN_API_GLOBALLY_DISABLED: {
// Log to the developer console.
web_contents_->GetMainFrame()->AddMessageToConsole(
blink::mojom::ConsoleMessageLevel::kInfo,
"Bluetooth permission has been blocked.");
// Block requests.
RecordRequestDeviceOutcome(
UMARequestDeviceOutcome::BLUETOOTH_GLOBALLY_DISABLED);
break;
}
default:
break;
}
PostErrorCallback(allow_result);
return;
}
if (!adapter_->IsPresent()) {
DVLOG(1) << "Bluetooth Adapter not present. Can't serve requestDevice.";
RecordRequestDeviceOutcome(
UMARequestDeviceOutcome::BLUETOOTH_ADAPTER_NOT_PRESENT);
PostErrorCallback(WebBluetoothResult::NO_BLUETOOTH_ADAPTER);
return;
}
BluetoothChooser::EventHandler chooser_event_handler =
base::Bind(&BluetoothDeviceChooserController::OnBluetoothChooserEvent,
base::Unretained(this));
if (WebContentsDelegate* delegate = web_contents_->GetDelegate()) {
chooser_ = delegate->RunBluetoothChooser(render_frame_host_,
std::move(chooser_event_handler));
}
if (!chooser_.get()) {
PostErrorCallback(WebBluetoothResult::WEB_BLUETOOTH_NOT_SUPPORTED);
return;
}
if (!chooser_->CanAskForScanningPermission()) {
DVLOG(1) << "Closing immediately because Chooser cannot obtain permission.";
OnBluetoothChooserEvent(BluetoothChooser::Event::DENIED_PERMISSION,
"" /* device_address */);
return;
}
device_ids_.clear();
PopulateConnectedDevices();
if (!chooser_.get()) {
return;
}
if (!adapter_->IsPowered()) {
chooser_->SetAdapterPresence(
BluetoothChooser::AdapterPresence::POWERED_OFF);
return;
}
StartDeviceDiscovery();
}
| 172,443 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: long Block::Parse(const Cluster* pCluster) {
if (pCluster == NULL)
return -1;
if (pCluster->m_pSegment == NULL)
return -1;
assert(m_start >= 0);
assert(m_size >= 0);
assert(m_track <= 0);
assert(m_frames == NULL);
assert(m_frame_count <= 0);
long long pos = m_start;
const long long stop = m_start + m_size;
long len;
IMkvReader* const pReader = pCluster->m_pSegment->m_pReader;
m_track = ReadUInt(pReader, pos, len);
if (m_track <= 0)
return E_FILE_FORMAT_INVALID;
if ((pos + len) > stop)
return E_FILE_FORMAT_INVALID;
pos += len; // consume track number
if ((stop - pos) < 2)
return E_FILE_FORMAT_INVALID;
long status;
long long value;
status = UnserializeInt(pReader, pos, 2, value);
if (status)
return E_FILE_FORMAT_INVALID;
if (value < SHRT_MIN)
return E_FILE_FORMAT_INVALID;
if (value > SHRT_MAX)
return E_FILE_FORMAT_INVALID;
m_timecode = static_cast<short>(value);
pos += 2;
if ((stop - pos) <= 0)
return E_FILE_FORMAT_INVALID;
status = pReader->Read(pos, 1, &m_flags);
if (status)
return E_FILE_FORMAT_INVALID;
const int lacing = int(m_flags & 0x06) >> 1;
++pos; // consume flags byte
if (lacing == 0) { // no lacing
if (pos > stop)
return E_FILE_FORMAT_INVALID;
m_frame_count = 1;
m_frames = new Frame[m_frame_count];
Frame& f = m_frames[0];
f.pos = pos;
const long long frame_size = stop - pos;
if (frame_size > LONG_MAX)
return E_FILE_FORMAT_INVALID;
f.len = static_cast<long>(frame_size);
return 0; // success
}
if (pos >= stop)
return E_FILE_FORMAT_INVALID;
unsigned char biased_count;
status = pReader->Read(pos, 1, &biased_count);
if (status)
return E_FILE_FORMAT_INVALID;
++pos; // consume frame count
assert(pos <= stop);
m_frame_count = int(biased_count) + 1;
m_frames = new Frame[m_frame_count];
assert(m_frames);
if (lacing == 1) { // Xiph
Frame* pf = m_frames;
Frame* const pf_end = pf + m_frame_count;
long size = 0;
int frame_count = m_frame_count;
while (frame_count > 1) {
long frame_size = 0;
for (;;) {
unsigned char val;
if (pos >= stop)
return E_FILE_FORMAT_INVALID;
status = pReader->Read(pos, 1, &val);
if (status)
return E_FILE_FORMAT_INVALID;
++pos; // consume xiph size byte
frame_size += val;
if (val < 255)
break;
}
Frame& f = *pf++;
assert(pf < pf_end);
f.pos = 0; // patch later
f.len = frame_size;
size += frame_size; // contribution of this frame
--frame_count;
}
assert(pf < pf_end);
assert(pos <= stop);
{
Frame& f = *pf++;
if (pf != pf_end)
return E_FILE_FORMAT_INVALID;
f.pos = 0; // patch later
const long long total_size = stop - pos;
if (total_size < size)
return E_FILE_FORMAT_INVALID;
const long long frame_size = total_size - size;
if (frame_size > LONG_MAX)
return E_FILE_FORMAT_INVALID;
f.len = static_cast<long>(frame_size);
}
pf = m_frames;
while (pf != pf_end) {
Frame& f = *pf++;
assert((pos + f.len) <= stop);
f.pos = pos;
pos += f.len;
}
assert(pos == stop);
} else if (lacing == 2) { // fixed-size lacing
const long long total_size = stop - pos;
if ((total_size % m_frame_count) != 0)
return E_FILE_FORMAT_INVALID;
const long long frame_size = total_size / m_frame_count;
if (frame_size > LONG_MAX)
return E_FILE_FORMAT_INVALID;
Frame* pf = m_frames;
Frame* const pf_end = pf + m_frame_count;
while (pf != pf_end) {
assert((pos + frame_size) <= stop);
Frame& f = *pf++;
f.pos = pos;
f.len = static_cast<long>(frame_size);
pos += frame_size;
}
assert(pos == stop);
} else {
assert(lacing == 3); // EBML lacing
if (pos >= stop)
return E_FILE_FORMAT_INVALID;
long size = 0;
int frame_count = m_frame_count;
long long frame_size = ReadUInt(pReader, pos, len);
if (frame_size < 0)
return E_FILE_FORMAT_INVALID;
if (frame_size > LONG_MAX)
return E_FILE_FORMAT_INVALID;
if ((pos + len) > stop)
return E_FILE_FORMAT_INVALID;
pos += len; // consume length of size of first frame
if ((pos + frame_size) > stop)
return E_FILE_FORMAT_INVALID;
Frame* pf = m_frames;
Frame* const pf_end = pf + m_frame_count;
{
Frame& curr = *pf;
curr.pos = 0; // patch later
curr.len = static_cast<long>(frame_size);
size += curr.len; // contribution of this frame
}
--frame_count;
while (frame_count > 1) {
if (pos >= stop)
return E_FILE_FORMAT_INVALID;
assert(pf < pf_end);
const Frame& prev = *pf++;
assert(prev.len == frame_size);
if (prev.len != frame_size)
return E_FILE_FORMAT_INVALID;
assert(pf < pf_end);
Frame& curr = *pf;
curr.pos = 0; // patch later
const long long delta_size_ = ReadUInt(pReader, pos, len);
if (delta_size_ < 0)
return E_FILE_FORMAT_INVALID;
if ((pos + len) > stop)
return E_FILE_FORMAT_INVALID;
pos += len; // consume length of (delta) size
assert(pos <= stop);
const int exp = 7 * len - 1;
const long long bias = (1LL << exp) - 1LL;
const long long delta_size = delta_size_ - bias;
frame_size += delta_size;
if (frame_size < 0)
return E_FILE_FORMAT_INVALID;
if (frame_size > LONG_MAX)
return E_FILE_FORMAT_INVALID;
curr.len = static_cast<long>(frame_size);
size += curr.len; // contribution of this frame
--frame_count;
}
{
assert(pos <= stop);
assert(pf < pf_end);
const Frame& prev = *pf++;
assert(prev.len == frame_size);
if (prev.len != frame_size)
return E_FILE_FORMAT_INVALID;
assert(pf < pf_end);
Frame& curr = *pf++;
assert(pf == pf_end);
curr.pos = 0; // patch later
const long long total_size = stop - pos;
if (total_size < size)
return E_FILE_FORMAT_INVALID;
frame_size = total_size - size;
if (frame_size > LONG_MAX)
return E_FILE_FORMAT_INVALID;
curr.len = static_cast<long>(frame_size);
}
pf = m_frames;
while (pf != pf_end) {
Frame& f = *pf++;
assert((pos + f.len) <= stop);
f.pos = pos;
pos += f.len;
}
assert(pos == stop);
}
return 0; // success
}
Commit Message: external/libvpx/libwebm: Update snapshot
Update libwebm snapshot. This update contains security fixes from upstream.
Upstream git hash: 229f49347d19b0ca0941e072b199a242ef6c5f2b
BUG=23167726
Change-Id: Id3e140e7b31ae11294724b1ecfe2e9c83b4d4207
(cherry picked from commit d0281a15b3c6bd91756e453cc9398c5ef412d99a)
CWE ID: CWE-20 | long Block::Parse(const Cluster* pCluster) {
if (pCluster == NULL)
return -1;
if (pCluster->m_pSegment == NULL)
return -1;
assert(m_start >= 0);
assert(m_size >= 0);
assert(m_track <= 0);
assert(m_frames == NULL);
assert(m_frame_count <= 0);
long long pos = m_start;
const long long stop = m_start + m_size;
long len;
IMkvReader* const pReader = pCluster->m_pSegment->m_pReader;
m_track = ReadUInt(pReader, pos, len);
if (m_track <= 0)
return E_FILE_FORMAT_INVALID;
if ((pos + len) > stop)
return E_FILE_FORMAT_INVALID;
pos += len; // consume track number
if ((stop - pos) < 2)
return E_FILE_FORMAT_INVALID;
long status;
long long value;
status = UnserializeInt(pReader, pos, 2, value);
if (status)
return E_FILE_FORMAT_INVALID;
if (value < SHRT_MIN)
return E_FILE_FORMAT_INVALID;
if (value > SHRT_MAX)
return E_FILE_FORMAT_INVALID;
m_timecode = static_cast<short>(value);
pos += 2;
if ((stop - pos) <= 0)
return E_FILE_FORMAT_INVALID;
status = pReader->Read(pos, 1, &m_flags);
if (status)
return E_FILE_FORMAT_INVALID;
const int lacing = int(m_flags & 0x06) >> 1;
++pos; // consume flags byte
if (lacing == 0) { // no lacing
if (pos > stop)
return E_FILE_FORMAT_INVALID;
m_frame_count = 1;
m_frames = new (std::nothrow) Frame[m_frame_count];
if (m_frames == NULL)
return -1;
Frame& f = m_frames[0];
f.pos = pos;
const long long frame_size = stop - pos;
if (frame_size > LONG_MAX || frame_size <= 0)
return E_FILE_FORMAT_INVALID;
f.len = static_cast<long>(frame_size);
return 0; // success
}
if (pos >= stop)
return E_FILE_FORMAT_INVALID;
unsigned char biased_count;
status = pReader->Read(pos, 1, &biased_count);
if (status)
return E_FILE_FORMAT_INVALID;
++pos; // consume frame count
if (pos > stop)
return E_FILE_FORMAT_INVALID;
m_frame_count = int(biased_count) + 1;
m_frames = new (std::nothrow) Frame[m_frame_count];
if (m_frames == NULL)
return -1;
if (!m_frames)
return E_FILE_FORMAT_INVALID;
if (lacing == 1) { // Xiph
Frame* pf = m_frames;
Frame* const pf_end = pf + m_frame_count;
long long size = 0;
int frame_count = m_frame_count;
while (frame_count > 1) {
long frame_size = 0;
for (;;) {
unsigned char val;
if (pos >= stop)
return E_FILE_FORMAT_INVALID;
status = pReader->Read(pos, 1, &val);
if (status)
return E_FILE_FORMAT_INVALID;
++pos; // consume xiph size byte
frame_size += val;
if (val < 255)
break;
}
Frame& f = *pf++;
assert(pf < pf_end);
if (pf >= pf_end)
return E_FILE_FORMAT_INVALID;
f.pos = 0; // patch later
if (frame_size <= 0)
return E_FILE_FORMAT_INVALID;
f.len = frame_size;
size += frame_size; // contribution of this frame
--frame_count;
}
if (pf >= pf_end || pos > stop)
return E_FILE_FORMAT_INVALID;
{
Frame& f = *pf++;
if (pf != pf_end)
return E_FILE_FORMAT_INVALID;
f.pos = 0; // patch later
const long long total_size = stop - pos;
if (total_size < size)
return E_FILE_FORMAT_INVALID;
const long long frame_size = total_size - size;
if (frame_size > LONG_MAX || frame_size <= 0)
return E_FILE_FORMAT_INVALID;
f.len = static_cast<long>(frame_size);
}
pf = m_frames;
while (pf != pf_end) {
Frame& f = *pf++;
assert((pos + f.len) <= stop);
if ((pos + f.len) > stop)
return E_FILE_FORMAT_INVALID;
f.pos = pos;
pos += f.len;
}
assert(pos == stop);
if (pos != stop)
return E_FILE_FORMAT_INVALID;
} else if (lacing == 2) { // fixed-size lacing
if (pos >= stop)
return E_FILE_FORMAT_INVALID;
const long long total_size = stop - pos;
if ((total_size % m_frame_count) != 0)
return E_FILE_FORMAT_INVALID;
const long long frame_size = total_size / m_frame_count;
if (frame_size > LONG_MAX || frame_size <= 0)
return E_FILE_FORMAT_INVALID;
Frame* pf = m_frames;
Frame* const pf_end = pf + m_frame_count;
while (pf != pf_end) {
assert((pos + frame_size) <= stop);
if ((pos + frame_size) > stop)
return E_FILE_FORMAT_INVALID;
Frame& f = *pf++;
f.pos = pos;
f.len = static_cast<long>(frame_size);
pos += frame_size;
}
assert(pos == stop);
if (pos != stop)
return E_FILE_FORMAT_INVALID;
} else {
assert(lacing == 3); // EBML lacing
if (pos >= stop)
return E_FILE_FORMAT_INVALID;
long long size = 0;
int frame_count = m_frame_count;
long long frame_size = ReadUInt(pReader, pos, len);
if (frame_size <= 0)
return E_FILE_FORMAT_INVALID;
if (frame_size > LONG_MAX)
return E_FILE_FORMAT_INVALID;
if ((pos + len) > stop)
return E_FILE_FORMAT_INVALID;
pos += len; // consume length of size of first frame
if ((pos + frame_size) > stop)
return E_FILE_FORMAT_INVALID;
Frame* pf = m_frames;
Frame* const pf_end = pf + m_frame_count;
{
Frame& curr = *pf;
curr.pos = 0; // patch later
curr.len = static_cast<long>(frame_size);
size += curr.len; // contribution of this frame
}
--frame_count;
while (frame_count > 1) {
if (pos >= stop)
return E_FILE_FORMAT_INVALID;
assert(pf < pf_end);
if (pf >= pf_end)
return E_FILE_FORMAT_INVALID;
const Frame& prev = *pf++;
assert(prev.len == frame_size);
if (prev.len != frame_size)
return E_FILE_FORMAT_INVALID;
assert(pf < pf_end);
if (pf >= pf_end)
return E_FILE_FORMAT_INVALID;
Frame& curr = *pf;
curr.pos = 0; // patch later
const long long delta_size_ = ReadUInt(pReader, pos, len);
if (delta_size_ < 0)
return E_FILE_FORMAT_INVALID;
if ((pos + len) > stop)
return E_FILE_FORMAT_INVALID;
pos += len; // consume length of (delta) size
if (pos > stop)
return E_FILE_FORMAT_INVALID;
const int exp = 7 * len - 1;
const long long bias = (1LL << exp) - 1LL;
const long long delta_size = delta_size_ - bias;
frame_size += delta_size;
if (frame_size <= 0)
return E_FILE_FORMAT_INVALID;
if (frame_size > LONG_MAX)
return E_FILE_FORMAT_INVALID;
curr.len = static_cast<long>(frame_size);
size += curr.len; // contribution of this frame
--frame_count;
}
// parse last frame
if (frame_count > 0) {
if (pos > stop || pf >= pf_end)
return E_FILE_FORMAT_INVALID;
const Frame& prev = *pf++;
assert(prev.len == frame_size);
if (prev.len != frame_size)
return E_FILE_FORMAT_INVALID;
if (pf >= pf_end)
return E_FILE_FORMAT_INVALID;
Frame& curr = *pf++;
if (pf != pf_end)
return E_FILE_FORMAT_INVALID;
curr.pos = 0; // patch later
const long long total_size = stop - pos;
if (total_size < size)
return E_FILE_FORMAT_INVALID;
frame_size = total_size - size;
if (frame_size > LONG_MAX || frame_size <= 0)
return E_FILE_FORMAT_INVALID;
curr.len = static_cast<long>(frame_size);
}
pf = m_frames;
while (pf != pf_end) {
Frame& f = *pf++;
assert((pos + f.len) <= stop);
if ((pos + f.len) > stop)
return E_FILE_FORMAT_INVALID;
f.pos = pos;
pos += f.len;
}
if (pos != stop)
return E_FILE_FORMAT_INVALID;
}
return 0; // success
}
| 173,835 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool BluetoothDeviceChromeOS::RunPairingCallbacks(Status status) {
if (!agent_.get())
return false;
bool callback_run = false;
if (!pincode_callback_.is_null()) {
pincode_callback_.Run(status, "");
pincode_callback_.Reset();
callback_run = true;
}
if (!passkey_callback_.is_null()) {
passkey_callback_.Run(status, 0);
passkey_callback_.Reset();
callback_run = true;
}
if (!confirmation_callback_.is_null()) {
confirmation_callback_.Run(status);
confirmation_callback_.Reset();
callback_run = true;
}
return callback_run;
}
Commit Message: Refactor to support default Bluetooth pairing delegate
In order to support a default pairing delegate we need to move the agent
service provider delegate implementation from BluetoothDevice to
BluetoothAdapter while retaining the existing API.
BUG=338492
TEST=device_unittests, unit_tests, browser_tests
Review URL: https://codereview.chromium.org/148293003
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@252216 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: | bool BluetoothDeviceChromeOS::RunPairingCallbacks(Status status) {
| 171,238 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: midi_synth_load_patch(int dev, int format, const char __user *addr,
int offs, int count, int pmgr_flag)
{
int orig_dev = synth_devs[dev]->midi_dev;
struct sysex_info sysex;
int i;
unsigned long left, src_offs, eox_seen = 0;
int first_byte = 1;
int hdr_size = (unsigned long) &sysex.data[0] - (unsigned long) &sysex;
leave_sysex(dev);
if (!prefix_cmd(orig_dev, 0xf0))
return 0;
if (format != SYSEX_PATCH)
{
/* printk("MIDI Error: Invalid patch format (key) 0x%x\n", format);*/
return -EINVAL;
}
if (count < hdr_size)
{
/* printk("MIDI Error: Patch header too short\n");*/
return -EINVAL;
}
count -= hdr_size;
/*
* Copy the header from user space but ignore the first bytes which have
* been transferred already.
*/
if(copy_from_user(&((char *) &sysex)[offs], &(addr)[offs], hdr_size - offs))
return -EFAULT;
if (count < sysex.len)
{
/* printk(KERN_WARNING "MIDI Warning: Sysex record too short (%d<%d)\n", count, (int) sysex.len);*/
sysex.len = count;
}
left = sysex.len;
src_offs = 0;
for (i = 0; i < left && !signal_pending(current); i++)
{
unsigned char data;
if (get_user(data,
(unsigned char __user *)(addr + hdr_size + i)))
return -EFAULT;
eox_seen = (i > 0 && data & 0x80); /* End of sysex */
if (eox_seen && data != 0xf7)
data = 0xf7;
if (i == 0)
{
if (data != 0xf0)
{
printk(KERN_WARNING "midi_synth: Sysex start missing\n");
return -EINVAL;
}
}
while (!midi_devs[orig_dev]->outputc(orig_dev, (unsigned char) (data & 0xff)) &&
!signal_pending(current))
schedule();
if (!first_byte && data & 0x80)
return 0;
first_byte = 0;
}
if (!eox_seen)
midi_outc(orig_dev, 0xf7);
return 0;
}
Commit Message: sound/oss: remove offset from load_patch callbacks
Was: [PATCH] sound/oss/midi_synth: prevent underflow, use of
uninitialized value, and signedness issue
The offset passed to midi_synth_load_patch() can be essentially
arbitrary. If it's greater than the header length, this will result in
a copy_from_user(dst, src, negative_val). While this will just return
-EFAULT on x86, on other architectures this may cause memory corruption.
Additionally, the length field of the sysex_info structure may not be
initialized prior to its use. Finally, a signed comparison may result
in an unintentionally large loop.
On suggestion by Takashi Iwai, version two removes the offset argument
from the load_patch callbacks entirely, which also resolves similar
issues in opl3. Compile tested only.
v3 adjusts comments and hopefully gets copy offsets right.
Signed-off-by: Dan Rosenberg <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
CWE ID: CWE-189 | midi_synth_load_patch(int dev, int format, const char __user *addr,
int count, int pmgr_flag)
{
int orig_dev = synth_devs[dev]->midi_dev;
struct sysex_info sysex;
int i;
unsigned long left, src_offs, eox_seen = 0;
int first_byte = 1;
int hdr_size = (unsigned long) &sysex.data[0] - (unsigned long) &sysex;
leave_sysex(dev);
if (!prefix_cmd(orig_dev, 0xf0))
return 0;
/* Invalid patch format */
if (format != SYSEX_PATCH)
return -EINVAL;
/* Patch header too short */
if (count < hdr_size)
return -EINVAL;
count -= hdr_size;
/*
* Copy the header from user space
*/
if (copy_from_user(&sysex, addr, hdr_size))
return -EFAULT;
/* Sysex record too short */
if ((unsigned)count < (unsigned)sysex.len)
sysex.len = count;
left = sysex.len;
src_offs = 0;
for (i = 0; i < left && !signal_pending(current); i++)
{
unsigned char data;
if (get_user(data,
(unsigned char __user *)(addr + hdr_size + i)))
return -EFAULT;
eox_seen = (i > 0 && data & 0x80); /* End of sysex */
if (eox_seen && data != 0xf7)
data = 0xf7;
if (i == 0)
{
if (data != 0xf0)
{
printk(KERN_WARNING "midi_synth: Sysex start missing\n");
return -EINVAL;
}
}
while (!midi_devs[orig_dev]->outputc(orig_dev, (unsigned char) (data & 0xff)) &&
!signal_pending(current))
schedule();
if (!first_byte && data & 0x80)
return 0;
first_byte = 0;
}
if (!eox_seen)
midi_outc(orig_dev, 0xf7);
return 0;
}
| 165,892 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
{
if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
return -EINVAL;
if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
return kvm_irqfd_deassign(kvm, args);
return kvm_irqfd_assign(kvm, args);
}
Commit Message: KVM: Don't accept obviously wrong gsi values via KVM_IRQFD
We cannot add routes for gsi values >= KVM_MAX_IRQ_ROUTES -- see
kvm_set_irq_routing(). Hence, there is no sense in accepting them
via KVM_IRQFD. Prevent them from entering the system in the first
place.
Signed-off-by: Jan H. Schönherr <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
CWE ID: CWE-20 | kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
{
if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
return -EINVAL;
if (args->gsi >= KVM_MAX_IRQ_ROUTES)
return -EINVAL;
if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
return kvm_irqfd_deassign(kvm, args);
return kvm_irqfd_assign(kvm, args);
}
| 167,620 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: WORD32 ih264d_read_mmco_commands(struct _DecStruct * ps_dec)
{
dec_bit_stream_t *ps_bitstrm = ps_dec->ps_bitstrm;
dpb_commands_t *ps_dpb_cmds = ps_dec->ps_dpb_cmds;
dec_slice_params_t * ps_slice = ps_dec->ps_cur_slice;
WORD32 j;
UWORD8 u1_buf_mode;
struct MMCParams *ps_mmc_params;
UWORD32 *pu4_bitstrm_buf = ps_dec->ps_bitstrm->pu4_buffer;
UWORD32 *pu4_bitstrm_ofst = &ps_bitstrm->u4_ofst;
UWORD32 u4_bit_ofst = ps_dec->ps_bitstrm->u4_ofst;
ps_slice->u1_mmco_equalto5 = 0;
{
if(ps_dec->u1_nal_unit_type == IDR_SLICE_NAL)
{
ps_slice->u1_no_output_of_prior_pics_flag =
ih264d_get_bit_h264(ps_bitstrm);
COPYTHECONTEXT("SH: no_output_of_prior_pics_flag",
ps_slice->u1_no_output_of_prior_pics_flag);
ps_slice->u1_long_term_reference_flag = ih264d_get_bit_h264(
ps_bitstrm);
COPYTHECONTEXT("SH: long_term_reference_flag",
ps_slice->u1_long_term_reference_flag);
ps_dpb_cmds->u1_idr_pic = 1;
ps_dpb_cmds->u1_no_output_of_prior_pics_flag =
ps_slice->u1_no_output_of_prior_pics_flag;
ps_dpb_cmds->u1_long_term_reference_flag =
ps_slice->u1_long_term_reference_flag;
}
else
{
u1_buf_mode = ih264d_get_bit_h264(ps_bitstrm); //0 - sliding window; 1 - arbitrary
COPYTHECONTEXT("SH: adaptive_ref_pic_buffering_flag", u1_buf_mode);
ps_dpb_cmds->u1_buf_mode = u1_buf_mode;
j = 0;
if(u1_buf_mode == 1)
{
UWORD32 u4_mmco;
UWORD32 u4_diff_pic_num;
UWORD32 u4_lt_idx, u4_max_lt_idx;
u4_mmco = ih264d_uev(pu4_bitstrm_ofst,
pu4_bitstrm_buf);
while(u4_mmco != END_OF_MMCO)
{
ps_mmc_params = &ps_dpb_cmds->as_mmc_params[j];
ps_mmc_params->u4_mmco = u4_mmco;
switch(u4_mmco)
{
case MARK_ST_PICNUM_AS_NONREF:
u4_diff_pic_num = ih264d_uev(pu4_bitstrm_ofst,
pu4_bitstrm_buf);
ps_mmc_params->u4_diff_pic_num = u4_diff_pic_num;
break;
case MARK_LT_INDEX_AS_NONREF:
u4_lt_idx = ih264d_uev(pu4_bitstrm_ofst,
pu4_bitstrm_buf);
ps_mmc_params->u4_lt_idx = u4_lt_idx;
break;
case MARK_ST_PICNUM_AS_LT_INDEX:
u4_diff_pic_num = ih264d_uev(pu4_bitstrm_ofst,
pu4_bitstrm_buf);
ps_mmc_params->u4_diff_pic_num = u4_diff_pic_num;
u4_lt_idx = ih264d_uev(pu4_bitstrm_ofst,
pu4_bitstrm_buf);
ps_mmc_params->u4_lt_idx = u4_lt_idx;
break;
case SET_MAX_LT_INDEX:
{
u4_max_lt_idx = ih264d_uev(pu4_bitstrm_ofst,
pu4_bitstrm_buf);
ps_mmc_params->u4_max_lt_idx_plus1 = u4_max_lt_idx;
break;
}
case RESET_REF_PICTURES:
{
ps_slice->u1_mmco_equalto5 = 1;
break;
}
case SET_LT_INDEX:
u4_lt_idx = ih264d_uev(pu4_bitstrm_ofst,
pu4_bitstrm_buf);
ps_mmc_params->u4_lt_idx = u4_lt_idx;
break;
default:
break;
}
u4_mmco = ih264d_uev(pu4_bitstrm_ofst,
pu4_bitstrm_buf);
j++;
}
ps_dpb_cmds->u1_num_of_commands = j;
}
}
ps_dpb_cmds->u1_dpb_commands_read = 1;
ps_dpb_cmds->u1_dpb_commands_read_slc = 1;
}
u4_bit_ofst = ps_dec->ps_bitstrm->u4_ofst - u4_bit_ofst;
return u4_bit_ofst;
}
Commit Message: Return error when there are more mmco params than allocated size
Bug: 25818142
Change-Id: I5c1b23985eeca5192b42703c627ca3d060e4e13d
CWE ID: CWE-119 | WORD32 ih264d_read_mmco_commands(struct _DecStruct * ps_dec)
{
dec_bit_stream_t *ps_bitstrm = ps_dec->ps_bitstrm;
dpb_commands_t *ps_dpb_cmds = ps_dec->ps_dpb_cmds;
dec_slice_params_t * ps_slice = ps_dec->ps_cur_slice;
WORD32 j;
UWORD8 u1_buf_mode;
struct MMCParams *ps_mmc_params;
UWORD32 *pu4_bitstrm_buf = ps_dec->ps_bitstrm->pu4_buffer;
UWORD32 *pu4_bitstrm_ofst = &ps_bitstrm->u4_ofst;
UWORD32 u4_bit_ofst = ps_dec->ps_bitstrm->u4_ofst;
ps_slice->u1_mmco_equalto5 = 0;
{
if(ps_dec->u1_nal_unit_type == IDR_SLICE_NAL)
{
ps_slice->u1_no_output_of_prior_pics_flag =
ih264d_get_bit_h264(ps_bitstrm);
COPYTHECONTEXT("SH: no_output_of_prior_pics_flag",
ps_slice->u1_no_output_of_prior_pics_flag);
ps_slice->u1_long_term_reference_flag = ih264d_get_bit_h264(
ps_bitstrm);
COPYTHECONTEXT("SH: long_term_reference_flag",
ps_slice->u1_long_term_reference_flag);
ps_dpb_cmds->u1_idr_pic = 1;
ps_dpb_cmds->u1_no_output_of_prior_pics_flag =
ps_slice->u1_no_output_of_prior_pics_flag;
ps_dpb_cmds->u1_long_term_reference_flag =
ps_slice->u1_long_term_reference_flag;
}
else
{
u1_buf_mode = ih264d_get_bit_h264(ps_bitstrm); //0 - sliding window; 1 - arbitrary
COPYTHECONTEXT("SH: adaptive_ref_pic_buffering_flag", u1_buf_mode);
ps_dpb_cmds->u1_buf_mode = u1_buf_mode;
j = 0;
if(u1_buf_mode == 1)
{
UWORD32 u4_mmco;
UWORD32 u4_diff_pic_num;
UWORD32 u4_lt_idx, u4_max_lt_idx;
u4_mmco = ih264d_uev(pu4_bitstrm_ofst,
pu4_bitstrm_buf);
while(u4_mmco != END_OF_MMCO)
{
if (j >= MAX_REF_BUFS)
{
ALOGE("b/25818142");
android_errorWriteLog(0x534e4554, "25818142");
ps_dpb_cmds->u1_num_of_commands = 0;
return -1;
}
ps_mmc_params = &ps_dpb_cmds->as_mmc_params[j];
ps_mmc_params->u4_mmco = u4_mmco;
switch(u4_mmco)
{
case MARK_ST_PICNUM_AS_NONREF:
u4_diff_pic_num = ih264d_uev(pu4_bitstrm_ofst,
pu4_bitstrm_buf);
ps_mmc_params->u4_diff_pic_num = u4_diff_pic_num;
break;
case MARK_LT_INDEX_AS_NONREF:
u4_lt_idx = ih264d_uev(pu4_bitstrm_ofst,
pu4_bitstrm_buf);
ps_mmc_params->u4_lt_idx = u4_lt_idx;
break;
case MARK_ST_PICNUM_AS_LT_INDEX:
u4_diff_pic_num = ih264d_uev(pu4_bitstrm_ofst,
pu4_bitstrm_buf);
ps_mmc_params->u4_diff_pic_num = u4_diff_pic_num;
u4_lt_idx = ih264d_uev(pu4_bitstrm_ofst,
pu4_bitstrm_buf);
ps_mmc_params->u4_lt_idx = u4_lt_idx;
break;
case SET_MAX_LT_INDEX:
{
u4_max_lt_idx = ih264d_uev(pu4_bitstrm_ofst,
pu4_bitstrm_buf);
ps_mmc_params->u4_max_lt_idx_plus1 = u4_max_lt_idx;
break;
}
case RESET_REF_PICTURES:
{
ps_slice->u1_mmco_equalto5 = 1;
break;
}
case SET_LT_INDEX:
u4_lt_idx = ih264d_uev(pu4_bitstrm_ofst,
pu4_bitstrm_buf);
ps_mmc_params->u4_lt_idx = u4_lt_idx;
break;
default:
break;
}
u4_mmco = ih264d_uev(pu4_bitstrm_ofst,
pu4_bitstrm_buf);
j++;
}
ps_dpb_cmds->u1_num_of_commands = j;
}
}
ps_dpb_cmds->u1_dpb_commands_read = 1;
ps_dpb_cmds->u1_dpb_commands_read_slc = 1;
}
u4_bit_ofst = ps_dec->ps_bitstrm->u4_ofst - u4_bit_ofst;
return u4_bit_ofst;
}
| 173,907 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void dns_resolver_describe(const struct key *key, struct seq_file *m)
{
seq_puts(m, key->description);
if (key_is_instantiated(key)) {
int err = PTR_ERR(key->payload.data[dns_key_error]);
if (err)
seq_printf(m, ": %d", err);
else
seq_printf(m, ": %u", key->datalen);
}
}
Commit Message: KEYS: Fix race between updating and finding a negative key
Consolidate KEY_FLAG_INSTANTIATED, KEY_FLAG_NEGATIVE and the rejection
error into one field such that:
(1) The instantiation state can be modified/read atomically.
(2) The error can be accessed atomically with the state.
(3) The error isn't stored unioned with the payload pointers.
This deals with the problem that the state is spread over three different
objects (two bits and a separate variable) and reading or updating them
atomically isn't practical, given that not only can uninstantiated keys
change into instantiated or rejected keys, but rejected keys can also turn
into instantiated keys - and someone accessing the key might not be using
any locking.
The main side effect of this problem is that what was held in the payload
may change, depending on the state. For instance, you might observe the
key to be in the rejected state. You then read the cached error, but if
the key semaphore wasn't locked, the key might've become instantiated
between the two reads - and you might now have something in hand that isn't
actually an error code.
The state is now KEY_IS_UNINSTANTIATED, KEY_IS_POSITIVE or a negative error
code if the key is negatively instantiated. The key_is_instantiated()
function is replaced with key_is_positive() to avoid confusion as negative
keys are also 'instantiated'.
Additionally, barriering is included:
(1) Order payload-set before state-set during instantiation.
(2) Order state-read before payload-read when using the key.
Further separate barriering is necessary if RCU is being used to access the
payload content after reading the payload pointers.
Fixes: 146aa8b1453b ("KEYS: Merge the type-specific data with the payload data")
Cc: [email protected] # v4.4+
Reported-by: Eric Biggers <[email protected]>
Signed-off-by: David Howells <[email protected]>
Reviewed-by: Eric Biggers <[email protected]>
CWE ID: CWE-20 | static void dns_resolver_describe(const struct key *key, struct seq_file *m)
{
seq_puts(m, key->description);
if (key_is_positive(key)) {
int err = PTR_ERR(key->payload.data[dns_key_error]);
if (err)
seq_printf(m, ": %d", err);
else
seq_printf(m, ": %u", key->datalen);
}
}
| 167,691 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void vrend_set_framebuffer_state(struct vrend_context *ctx,
uint32_t nr_cbufs, uint32_t surf_handle[8],
uint32_t zsurf_handle)
{
struct vrend_surface *surf, *zsurf;
int i;
int old_num;
GLenum status;
GLint new_height = -1;
bool new_ibf = false;
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, ctx->sub->fb_id);
if (zsurf_handle) {
zsurf = vrend_object_lookup(ctx->sub->object_hash, zsurf_handle, VIRGL_OBJECT_SURFACE);
if (!zsurf) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, zsurf_handle);
return;
}
} else
zsurf = NULL;
if (ctx->sub->zsurf != zsurf) {
vrend_surface_reference(&ctx->sub->zsurf, zsurf);
vrend_hw_set_zsurf_texture(ctx);
}
old_num = ctx->sub->nr_cbufs;
ctx->sub->nr_cbufs = nr_cbufs;
ctx->sub->old_nr_cbufs = old_num;
for (i = 0; i < nr_cbufs; i++) {
if (surf_handle[i] != 0) {
surf = vrend_object_lookup(ctx->sub->object_hash, surf_handle[i], VIRGL_OBJECT_SURFACE);
if (!surf) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, surf_handle[i]);
return;
}
} else
surf = NULL;
if (ctx->sub->surf[i] != surf) {
vrend_surface_reference(&ctx->sub->surf[i], surf);
vrend_hw_set_color_surface(ctx, i);
}
}
if (old_num > ctx->sub->nr_cbufs) {
for (i = ctx->sub->nr_cbufs; i < old_num; i++) {
vrend_surface_reference(&ctx->sub->surf[i], NULL);
vrend_hw_set_color_surface(ctx, i);
}
}
/* find a buffer to set fb_height from */
if (ctx->sub->nr_cbufs == 0 && !ctx->sub->zsurf) {
new_height = 0;
new_ibf = false;
} else if (ctx->sub->nr_cbufs == 0) {
new_height = u_minify(ctx->sub->zsurf->texture->base.height0, ctx->sub->zsurf->val0);
new_ibf = ctx->sub->zsurf->texture->y_0_top ? true : false;
}
else {
surf = NULL;
for (i = 0; i < ctx->sub->nr_cbufs; i++) {
if (ctx->sub->surf[i]) {
surf = ctx->sub->surf[i];
break;
}
}
if (surf == NULL) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, i);
return;
}
new_height = u_minify(surf->texture->base.height0, surf->val0);
new_ibf = surf->texture->y_0_top ? true : false;
}
if (new_height != -1) {
if (ctx->sub->fb_height != new_height || ctx->sub->inverted_fbo_content != new_ibf) {
ctx->sub->fb_height = new_height;
ctx->sub->inverted_fbo_content = new_ibf;
ctx->sub->scissor_state_dirty = (1 << 0);
ctx->sub->viewport_state_dirty = (1 << 0);
}
}
vrend_hw_emit_framebuffer_state(ctx);
if (ctx->sub->nr_cbufs > 0 || ctx->sub->zsurf) {
status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE)
fprintf(stderr,"failed to complete framebuffer 0x%x %s\n", status, ctx->debug_name);
}
ctx->sub->shader_dirty = true;
}
Commit Message:
CWE ID: CWE-476 | void vrend_set_framebuffer_state(struct vrend_context *ctx,
uint32_t nr_cbufs, uint32_t surf_handle[PIPE_MAX_COLOR_BUFS],
uint32_t zsurf_handle)
{
struct vrend_surface *surf, *zsurf;
int i;
int old_num;
GLenum status;
GLint new_height = -1;
bool new_ibf = false;
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, ctx->sub->fb_id);
if (zsurf_handle) {
zsurf = vrend_object_lookup(ctx->sub->object_hash, zsurf_handle, VIRGL_OBJECT_SURFACE);
if (!zsurf) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, zsurf_handle);
return;
}
} else
zsurf = NULL;
if (ctx->sub->zsurf != zsurf) {
vrend_surface_reference(&ctx->sub->zsurf, zsurf);
vrend_hw_set_zsurf_texture(ctx);
}
old_num = ctx->sub->nr_cbufs;
ctx->sub->nr_cbufs = nr_cbufs;
ctx->sub->old_nr_cbufs = old_num;
for (i = 0; i < nr_cbufs; i++) {
if (surf_handle[i] != 0) {
surf = vrend_object_lookup(ctx->sub->object_hash, surf_handle[i], VIRGL_OBJECT_SURFACE);
if (!surf) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, surf_handle[i]);
return;
}
} else
surf = NULL;
if (ctx->sub->surf[i] != surf) {
vrend_surface_reference(&ctx->sub->surf[i], surf);
vrend_hw_set_color_surface(ctx, i);
}
}
if (old_num > ctx->sub->nr_cbufs) {
for (i = ctx->sub->nr_cbufs; i < old_num; i++) {
vrend_surface_reference(&ctx->sub->surf[i], NULL);
vrend_hw_set_color_surface(ctx, i);
}
}
/* find a buffer to set fb_height from */
if (ctx->sub->nr_cbufs == 0 && !ctx->sub->zsurf) {
new_height = 0;
new_ibf = false;
} else if (ctx->sub->nr_cbufs == 0) {
new_height = u_minify(ctx->sub->zsurf->texture->base.height0, ctx->sub->zsurf->val0);
new_ibf = ctx->sub->zsurf->texture->y_0_top ? true : false;
}
else {
surf = NULL;
for (i = 0; i < ctx->sub->nr_cbufs; i++) {
if (ctx->sub->surf[i]) {
surf = ctx->sub->surf[i];
break;
}
}
if (surf == NULL) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, i);
return;
}
new_height = u_minify(surf->texture->base.height0, surf->val0);
new_ibf = surf->texture->y_0_top ? true : false;
}
if (new_height != -1) {
if (ctx->sub->fb_height != new_height || ctx->sub->inverted_fbo_content != new_ibf) {
ctx->sub->fb_height = new_height;
ctx->sub->inverted_fbo_content = new_ibf;
ctx->sub->scissor_state_dirty = (1 << 0);
ctx->sub->viewport_state_dirty = (1 << 0);
}
}
vrend_hw_emit_framebuffer_state(ctx);
if (ctx->sub->nr_cbufs > 0 || ctx->sub->zsurf) {
status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE)
fprintf(stderr,"failed to complete framebuffer 0x%x %s\n", status, ctx->debug_name);
}
ctx->sub->shader_dirty = true;
}
| 164,959 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: main( int argc,
char* argv[] )
{
int old_ptsize, orig_ptsize, file;
int first_glyph = 0;
int XisSetup = 0;
char* execname;
int option;
int file_loaded;
grEvent event;
execname = ft_basename( argv[0] );
while ( 1 )
{
option = getopt( argc, argv, "d:e:f:r:" );
if ( option == -1 )
break;
switch ( option )
{
case 'd':
parse_design_coords( optarg );
break;
case 'e':
encoding = (FT_Encoding)make_tag( optarg );
break;
case 'f':
first_glyph = atoi( optarg );
break;
case 'r':
res = atoi( optarg );
if ( res < 1 )
usage( execname );
break;
default:
usage( execname );
break;
}
}
argc -= optind;
argv += optind;
if ( argc <= 1 )
usage( execname );
if ( sscanf( argv[0], "%d", &orig_ptsize ) != 1 )
orig_ptsize = 64;
file = 1;
/* Initialize engine */
error = FT_Init_FreeType( &library );
if ( error )
PanicZ( "Could not initialize FreeType library" );
NewFile:
ptsize = orig_ptsize;
hinted = 1;
file_loaded = 0;
/* Load face */
error = FT_New_Face( library, argv[file], 0, &face );
if ( error )
goto Display_Font;
if ( encoding != FT_ENCODING_NONE )
{
error = FT_Select_Charmap( face, encoding );
if ( error )
goto Display_Font;
}
/* retrieve multiple master information */
error = FT_Get_MM_Var( face, &multimaster );
if ( error )
goto Display_Font;
/* if the user specified a position, use it, otherwise */
/* set the current position to the median of each axis */
{
int n;
for ( n = 0; n < (int)multimaster->num_axis; n++ )
{
design_pos[n] = n < requested_cnt ? requested_pos[n]
: multimaster->axis[n].def;
if ( design_pos[n] < multimaster->axis[n].minimum )
design_pos[n] = multimaster->axis[n].minimum;
else if ( design_pos[n] > multimaster->axis[n].maximum )
design_pos[n] = multimaster->axis[n].maximum;
}
}
error = FT_Set_Var_Design_Coordinates( face,
multimaster->num_axis,
design_pos );
if ( error )
goto Display_Font;
file_loaded++;
Reset_Scale( ptsize );
num_glyphs = face->num_glyphs;
glyph = face->glyph;
size = face->size;
Display_Font:
/* initialize graphics if needed */
if ( !XisSetup )
{
XisSetup = 1;
Init_Display();
}
grSetTitle( surface, "FreeType Glyph Viewer - press F1 for help" );
old_ptsize = ptsize;
if ( file_loaded >= 1 )
{
Fail = 0;
Num = first_glyph;
if ( Num >= num_glyphs )
Num = num_glyphs - 1;
if ( Num < 0 )
Num = 0;
}
for ( ;; )
{
int key;
Clear_Display();
if ( file_loaded >= 1 )
{
switch ( render_mode )
{
case 0:
Render_Text( Num );
break;
default:
Render_All( Num, ptsize );
}
sprintf( Header, "%s %s (file %s)",
face->family_name,
face->style_name,
ft_basename( argv[file] ) );
if ( !new_header )
new_header = Header;
grWriteCellString( &bit, 0, 0, new_header, fore_color );
new_header = 0;
sprintf( Header, "axis: " );
{
int n;
for ( n = 0; n < (int)multimaster->num_axis; n++ )
{
char temp[32];
sprintf( temp, " %s:%g",
multimaster->axis[n].name,
design_pos[n]/65536. );
strcat( Header, temp );
}
}
grWriteCellString( &bit, 0, 16, Header, fore_color );
sprintf( Header, "at %d points, first glyph = %d",
ptsize,
Num );
}
else
{
sprintf( Header, "%s: not an MM font file, or could not be opened",
ft_basename( argv[file] ) );
}
grWriteCellString( &bit, 0, 8, Header, fore_color );
grRefreshSurface( surface );
grListenSurface( surface, 0, &event );
if ( !( key = Process_Event( &event ) ) )
goto End;
if ( key == 'n' )
{
if ( file_loaded >= 1 )
FT_Done_Face( face );
if ( file < argc - 1 )
file++;
goto NewFile;
}
if ( key == 'p' )
{
if ( file_loaded >= 1 )
FT_Done_Face( face );
if ( file > 1 )
file--;
goto NewFile;
}
if ( ptsize != old_ptsize )
{
Reset_Scale( ptsize );
old_ptsize = ptsize;
}
}
End:
grDoneSurface( surface );
grDoneDevices();
free ( multimaster );
FT_Done_Face ( face );
FT_Done_FreeType( library );
printf( "Execution completed successfully.\n" );
printf( "Fails = %d\n", Fail );
exit( 0 ); /* for safety reasons */
return 0; /* never reached */
}
Commit Message:
CWE ID: CWE-119 | main( int argc,
char* argv[] )
{
int old_ptsize, orig_ptsize, file;
int first_glyph = 0;
int XisSetup = 0;
char* execname;
int option;
int file_loaded;
grEvent event;
execname = ft_basename( argv[0] );
while ( 1 )
{
option = getopt( argc, argv, "d:e:f:r:" );
if ( option == -1 )
break;
switch ( option )
{
case 'd':
parse_design_coords( optarg );
break;
case 'e':
encoding = (FT_Encoding)make_tag( optarg );
break;
case 'f':
first_glyph = atoi( optarg );
break;
case 'r':
res = atoi( optarg );
if ( res < 1 )
usage( execname );
break;
default:
usage( execname );
break;
}
}
argc -= optind;
argv += optind;
if ( argc <= 1 )
usage( execname );
if ( sscanf( argv[0], "%d", &orig_ptsize ) != 1 )
orig_ptsize = 64;
file = 1;
/* Initialize engine */
error = FT_Init_FreeType( &library );
if ( error )
PanicZ( "Could not initialize FreeType library" );
NewFile:
ptsize = orig_ptsize;
hinted = 1;
file_loaded = 0;
/* Load face */
error = FT_New_Face( library, argv[file], 0, &face );
if ( error )
goto Display_Font;
if ( encoding != FT_ENCODING_NONE )
{
error = FT_Select_Charmap( face, encoding );
if ( error )
goto Display_Font;
}
/* retrieve multiple master information */
error = FT_Get_MM_Var( face, &multimaster );
if ( error )
goto Display_Font;
/* if the user specified a position, use it, otherwise */
/* set the current position to the median of each axis */
{
int n;
for ( n = 0; n < (int)multimaster->num_axis; n++ )
{
design_pos[n] = n < requested_cnt ? requested_pos[n]
: multimaster->axis[n].def;
if ( design_pos[n] < multimaster->axis[n].minimum )
design_pos[n] = multimaster->axis[n].minimum;
else if ( design_pos[n] > multimaster->axis[n].maximum )
design_pos[n] = multimaster->axis[n].maximum;
}
}
error = FT_Set_Var_Design_Coordinates( face,
multimaster->num_axis,
design_pos );
if ( error )
goto Display_Font;
file_loaded++;
Reset_Scale( ptsize );
num_glyphs = face->num_glyphs;
glyph = face->glyph;
size = face->size;
Display_Font:
/* initialize graphics if needed */
if ( !XisSetup )
{
XisSetup = 1;
Init_Display();
}
grSetTitle( surface, "FreeType Glyph Viewer - press F1 for help" );
old_ptsize = ptsize;
if ( file_loaded >= 1 )
{
Fail = 0;
Num = first_glyph;
if ( Num >= num_glyphs )
Num = num_glyphs - 1;
if ( Num < 0 )
Num = 0;
}
for ( ;; )
{
int key;
Clear_Display();
if ( file_loaded >= 1 )
{
switch ( render_mode )
{
case 0:
Render_Text( Num );
break;
default:
Render_All( Num, ptsize );
}
sprintf( Header, "%.50s %.50s (file %.100s)",
face->family_name,
face->style_name,
ft_basename( argv[file] ) );
if ( !new_header )
new_header = Header;
grWriteCellString( &bit, 0, 0, new_header, fore_color );
new_header = 0;
sprintf( Header, "axis: " );
{
int n;
for ( n = 0; n < (int)multimaster->num_axis; n++ )
{
char temp[32];
sprintf( temp, " %s:%g",
multimaster->axis[n].name,
design_pos[n]/65536. );
strcat( Header, temp );
}
}
grWriteCellString( &bit, 0, 16, Header, fore_color );
sprintf( Header, "at %d points, first glyph = %d",
ptsize,
Num );
}
else
{
sprintf( Header, "%.100s: not an MM font file, or could not be opened",
ft_basename( argv[file] ) );
}
grWriteCellString( &bit, 0, 8, Header, fore_color );
grRefreshSurface( surface );
grListenSurface( surface, 0, &event );
if ( !( key = Process_Event( &event ) ) )
goto End;
if ( key == 'n' )
{
if ( file_loaded >= 1 )
FT_Done_Face( face );
if ( file < argc - 1 )
file++;
goto NewFile;
}
if ( key == 'p' )
{
if ( file_loaded >= 1 )
FT_Done_Face( face );
if ( file > 1 )
file--;
goto NewFile;
}
if ( ptsize != old_ptsize )
{
Reset_Scale( ptsize );
old_ptsize = ptsize;
}
}
End:
grDoneSurface( surface );
grDoneDevices();
free ( multimaster );
FT_Done_Face ( face );
FT_Done_FreeType( library );
printf( "Execution completed successfully.\n" );
printf( "Fails = %d\n", Fail );
exit( 0 ); /* for safety reasons */
return 0; /* never reached */
}
| 164,999 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void MojoVideoEncodeAcceleratorService::UseOutputBitstreamBuffer(
int32_t bitstream_buffer_id,
mojo::ScopedSharedBufferHandle buffer) {
DVLOG(2) << __func__ << " bitstream_buffer_id=" << bitstream_buffer_id;
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (!encoder_)
return;
if (!buffer.is_valid()) {
DLOG(ERROR) << __func__ << " invalid |buffer|.";
NotifyError(::media::VideoEncodeAccelerator::kInvalidArgumentError);
return;
}
if (bitstream_buffer_id < 0) {
DLOG(ERROR) << __func__ << " bitstream_buffer_id=" << bitstream_buffer_id
<< " must be >= 0";
NotifyError(::media::VideoEncodeAccelerator::kInvalidArgumentError);
return;
}
base::SharedMemoryHandle handle;
size_t memory_size = 0;
bool read_only = false;
auto result = mojo::UnwrapSharedMemoryHandle(std::move(buffer), &handle,
&memory_size, &read_only);
if (result != MOJO_RESULT_OK || memory_size == 0u) {
DLOG(ERROR) << __func__ << " mojo::UnwrapSharedMemoryHandle() failed";
NotifyError(::media::VideoEncodeAccelerator::kPlatformFailureError);
return;
}
if (memory_size < output_buffer_size_) {
DLOG(ERROR) << __func__ << " bitstream_buffer_id=" << bitstream_buffer_id
<< " has a size of " << memory_size
<< "B, different from expected " << output_buffer_size_ << "B";
NotifyError(::media::VideoEncodeAccelerator::kInvalidArgumentError);
return;
}
encoder_->UseOutputBitstreamBuffer(
BitstreamBuffer(bitstream_buffer_id, handle, memory_size));
}
Commit Message: Correct mojo::WrapSharedMemoryHandle usage
Fixes some incorrect uses of mojo::WrapSharedMemoryHandle which
were assuming that the call actually has any control over the memory
protection applied to a handle when mapped.
Where fixing usage is infeasible for this CL, TODOs are added to
annotate follow-up work.
Also updates the API and documentation to (hopefully) improve clarity
and avoid similar mistakes from being made in the future.
BUG=792900
Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: I0578aaa9ca3bfcb01aaf2451315d1ede95458477
Reviewed-on: https://chromium-review.googlesource.com/818282
Reviewed-by: Wei Li <[email protected]>
Reviewed-by: Lei Zhang <[email protected]>
Reviewed-by: John Abd-El-Malek <[email protected]>
Reviewed-by: Daniel Cheng <[email protected]>
Reviewed-by: Sadrul Chowdhury <[email protected]>
Reviewed-by: Yuzhu Shen <[email protected]>
Reviewed-by: Robert Sesek <[email protected]>
Commit-Queue: Ken Rockot <[email protected]>
Cr-Commit-Position: refs/heads/master@{#530268}
CWE ID: CWE-787 | void MojoVideoEncodeAcceleratorService::UseOutputBitstreamBuffer(
int32_t bitstream_buffer_id,
mojo::ScopedSharedBufferHandle buffer) {
DVLOG(2) << __func__ << " bitstream_buffer_id=" << bitstream_buffer_id;
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (!encoder_)
return;
if (!buffer.is_valid()) {
DLOG(ERROR) << __func__ << " invalid |buffer|.";
NotifyError(::media::VideoEncodeAccelerator::kInvalidArgumentError);
return;
}
if (bitstream_buffer_id < 0) {
DLOG(ERROR) << __func__ << " bitstream_buffer_id=" << bitstream_buffer_id
<< " must be >= 0";
NotifyError(::media::VideoEncodeAccelerator::kInvalidArgumentError);
return;
}
base::SharedMemoryHandle handle;
size_t memory_size = 0;
auto result = mojo::UnwrapSharedMemoryHandle(std::move(buffer), &handle,
&memory_size, nullptr);
if (result != MOJO_RESULT_OK || memory_size == 0u) {
DLOG(ERROR) << __func__ << " mojo::UnwrapSharedMemoryHandle() failed";
NotifyError(::media::VideoEncodeAccelerator::kPlatformFailureError);
return;
}
if (memory_size < output_buffer_size_) {
DLOG(ERROR) << __func__ << " bitstream_buffer_id=" << bitstream_buffer_id
<< " has a size of " << memory_size
<< "B, different from expected " << output_buffer_size_ << "B";
NotifyError(::media::VideoEncodeAccelerator::kInvalidArgumentError);
return;
}
encoder_->UseOutputBitstreamBuffer(
BitstreamBuffer(bitstream_buffer_id, handle, memory_size));
}
| 172,881 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void pdf_summarize(
FILE *fp,
const pdf_t *pdf,
const char *name,
pdf_flag_t flags)
{
int i, j, page, n_versions, n_entries;
FILE *dst, *out;
char *dst_name, *c;
dst = NULL;
dst_name = NULL;
if (name)
{
dst_name = malloc(strlen(name) * 2 + 16);
sprintf(dst_name, "%s/%s", name, name);
if ((c = strrchr(dst_name, '.')) && (strncmp(c, ".pdf", 4) == 0))
*c = '\0';
strcat(dst_name, ".summary");
if (!(dst = fopen(dst_name, "w")))
{
ERR("Could not open file '%s' for writing\n", dst_name);
return;
}
}
/* Send output to file or stdout */
out = (dst) ? dst : stdout;
/* Count versions */
n_versions = pdf->n_xrefs;
if (n_versions && pdf->xrefs[0].is_linear)
--n_versions;
/* Ignore bad xref entry */
for (i=1; i<pdf->n_xrefs; ++i)
if (pdf->xrefs[i].end == 0)
--n_versions;
/* If we have no valid versions but linear, count that */
if (!pdf->n_xrefs || (!n_versions && pdf->xrefs[0].is_linear))
n_versions = 1;
/* Compare each object (if we dont have xref streams) */
n_entries = 0;
for (i=0; !(const int)pdf->has_xref_streams && i<pdf->n_xrefs; i++)
{
if (flags & PDF_FLAG_QUIET)
continue;
for (j=0; j<pdf->xrefs[i].n_entries; j++)
{
++n_entries;
fprintf(out,
"%s: --%c-- Version %d -- Object %d (%s)",
pdf->name,
pdf_get_object_status(pdf, i, j),
pdf->xrefs[i].version,
pdf->xrefs[i].entries[j].obj_id,
get_type(fp, pdf->xrefs[i].entries[j].obj_id,
&pdf->xrefs[i]));
/* TODO
page = get_page(pdf->xrefs[i].entries[j].obj_id, &pdf->xrefs[i]);
*/
if (0 /*page*/)
fprintf(out, " Page(%d)\n", page);
else
fprintf(out, "\n");
}
}
/* Trailing summary */
if (!(flags & PDF_FLAG_QUIET))
{
/* Let the user know that we cannot we print a per-object summary.
* If we have a 1.5 PDF using streams for xref, we have not objects
* to display, so let the user know whats up.
*/
if (pdf->has_xref_streams || !n_entries)
fprintf(out,
"%s: This PDF contains potential cross reference streams.\n"
"%s: An object summary is not available.\n",
pdf->name,
pdf->name);
fprintf(out,
"---------- %s ----------\n"
"Versions: %d\n",
pdf->name,
n_versions);
/* Count entries for summary */
if (!pdf->has_xref_streams)
for (i=0; i<pdf->n_xrefs; i++)
{
if (pdf->xrefs[i].is_linear)
continue;
n_entries = pdf->xrefs[i].n_entries;
/* If we are a linearized PDF, all versions are made from those
* objects too. So count em'
*/
if (pdf->xrefs[0].is_linear)
n_entries += pdf->xrefs[0].n_entries;
if (pdf->xrefs[i].version && n_entries)
fprintf(out,
"Version %d -- %d objects\n",
pdf->xrefs[i].version,
n_entries);
}
}
else /* Quiet output */
fprintf(out, "%s: %d\n", pdf->name, n_versions);
if (dst)
{
fclose(dst);
free(dst_name);
}
}
Commit Message: Zero and sanity check all dynamic allocs.
This addresses the memory issues in Issue #6 expressed in
calloc_some.pdf and malloc_some.pdf
CWE ID: CWE-787 | void pdf_summarize(
FILE *fp,
const pdf_t *pdf,
const char *name,
pdf_flag_t flags)
{
int i, j, page, n_versions, n_entries;
FILE *dst, *out;
char *dst_name, *c;
dst = NULL;
dst_name = NULL;
if (name)
{
dst_name = safe_calloc(strlen(name) * 2 + 16);
sprintf(dst_name, "%s/%s", name, name);
if ((c = strrchr(dst_name, '.')) && (strncmp(c, ".pdf", 4) == 0))
*c = '\0';
strcat(dst_name, ".summary");
if (!(dst = fopen(dst_name, "w")))
{
ERR("Could not open file '%s' for writing\n", dst_name);
return;
}
}
/* Send output to file or stdout */
out = (dst) ? dst : stdout;
/* Count versions */
n_versions = pdf->n_xrefs;
if (n_versions && pdf->xrefs[0].is_linear)
--n_versions;
/* Ignore bad xref entry */
for (i=1; i<pdf->n_xrefs; ++i)
if (pdf->xrefs[i].end == 0)
--n_versions;
/* If we have no valid versions but linear, count that */
if (!pdf->n_xrefs || (!n_versions && pdf->xrefs[0].is_linear))
n_versions = 1;
/* Compare each object (if we dont have xref streams) */
n_entries = 0;
for (i=0; !(const int)pdf->has_xref_streams && i<pdf->n_xrefs; i++)
{
if (flags & PDF_FLAG_QUIET)
continue;
for (j=0; j<pdf->xrefs[i].n_entries; j++)
{
++n_entries;
fprintf(out,
"%s: --%c-- Version %d -- Object %d (%s)",
pdf->name,
pdf_get_object_status(pdf, i, j),
pdf->xrefs[i].version,
pdf->xrefs[i].entries[j].obj_id,
get_type(fp, pdf->xrefs[i].entries[j].obj_id,
&pdf->xrefs[i]));
/* TODO
page = get_page(pdf->xrefs[i].entries[j].obj_id, &pdf->xrefs[i]);
*/
if (0 /*page*/)
fprintf(out, " Page(%d)\n", page);
else
fprintf(out, "\n");
}
}
/* Trailing summary */
if (!(flags & PDF_FLAG_QUIET))
{
/* Let the user know that we cannot we print a per-object summary.
* If we have a 1.5 PDF using streams for xref, we have not objects
* to display, so let the user know whats up.
*/
if (pdf->has_xref_streams || !n_entries)
fprintf(out,
"%s: This PDF contains potential cross reference streams.\n"
"%s: An object summary is not available.\n",
pdf->name,
pdf->name);
fprintf(out,
"---------- %s ----------\n"
"Versions: %d\n",
pdf->name,
n_versions);
/* Count entries for summary */
if (!pdf->has_xref_streams)
for (i=0; i<pdf->n_xrefs; i++)
{
if (pdf->xrefs[i].is_linear)
continue;
n_entries = pdf->xrefs[i].n_entries;
/* If we are a linearized PDF, all versions are made from those
* objects too. So count em'
*/
if (pdf->xrefs[0].is_linear)
n_entries += pdf->xrefs[0].n_entries;
if (pdf->xrefs[i].version && n_entries)
fprintf(out,
"Version %d -- %d objects\n",
pdf->xrefs[i].version,
n_entries);
}
}
else /* Quiet output */
fprintf(out, "%s: %d\n", pdf->name, n_versions);
if (dst)
{
fclose(dst);
free(dst_name);
}
}
| 169,574 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: t42_parse_sfnts( T42_Face face,
T42_Loader loader )
{
T42_Parser parser = &loader->parser;
FT_Memory memory = parser->root.memory;
FT_Byte* cur;
FT_Byte* limit = parser->root.limit;
FT_Error error;
FT_Int num_tables = 0;
FT_ULong count, ttf_size = 0;
FT_Long n, string_size, old_string_size, real_size;
FT_Byte* string_buf = NULL;
FT_Bool allocated = 0;
T42_Load_Status status;
/* The format is */
/* */
/* /sfnts [ <hexstring> <hexstring> ... ] def */
/* */
/* or */
/* */
/* /sfnts [ */
/* <num_bin_bytes> RD <binary data> */
/* <num_bin_bytes> RD <binary data> */
/* ... */
/* ] def */
/* */
/* with exactly one space after the `RD' token. */
T1_Skip_Spaces( parser );
if ( parser->root.cursor >= limit || *parser->root.cursor++ != '[' )
{
FT_ERROR(( "t42_parse_sfnts: can't find begin of sfnts vector\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
T1_Skip_Spaces( parser );
status = BEFORE_START;
string_size = 0;
old_string_size = 0;
count = 0;
while ( parser->root.cursor < limit )
{
cur = parser->root.cursor;
if ( *cur == ']' )
{
parser->root.cursor++;
goto Exit;
}
else if ( *cur == '<' )
{
T1_Skip_PS_Token( parser );
if ( parser->root.error )
goto Exit;
/* don't include delimiters */
string_size = (FT_Long)( ( parser->root.cursor - cur - 2 + 1 ) / 2 );
if ( FT_REALLOC( string_buf, old_string_size, string_size ) )
goto Fail;
allocated = 1;
parser->root.cursor = cur;
(void)T1_ToBytes( parser, string_buf, string_size, &real_size, 1 );
old_string_size = string_size;
string_size = real_size;
}
else if ( ft_isdigit( *cur ) )
{
if ( allocated )
{
FT_ERROR(( "t42_parse_sfnts: "
"can't handle mixed binary and hex strings\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
string_size = T1_ToInt( parser );
if ( string_size < 0 )
{
FT_ERROR(( "t42_parse_sfnts: invalid string size\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
T1_Skip_PS_Token( parser ); /* `RD' */
if ( parser->root.error )
return;
string_buf = parser->root.cursor + 1; /* one space after `RD' */
if ( limit - parser->root.cursor < string_size )
{
FT_ERROR(( "t42_parse_sfnts: too many binary data\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
else
parser->root.cursor += string_size + 1;
}
if ( !string_buf )
{
FT_ERROR(( "t42_parse_sfnts: invalid data in sfnts array\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
/* A string can have a trailing zero (odd) byte for padding. */
/* Ignore it. */
if ( ( string_size & 1 ) && string_buf[string_size - 1] == 0 )
string_size--;
if ( !string_size )
{
FT_ERROR(( "t42_parse_sfnts: invalid string\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
for ( n = 0; n < string_size; n++ )
{
switch ( status )
{
case BEFORE_START:
/* load offset table, 12 bytes */
if ( count < 12 )
{
face->ttf_data[count++] = string_buf[n];
continue;
}
else
{
num_tables = 16 * face->ttf_data[4] + face->ttf_data[5];
status = BEFORE_TABLE_DIR;
ttf_size = 12 + 16 * num_tables;
if ( FT_REALLOC( face->ttf_data, 12, ttf_size ) )
goto Fail;
}
/* fall through */
case BEFORE_TABLE_DIR:
/* the offset table is read; read the table directory */
if ( count < ttf_size )
{
face->ttf_data[count++] = string_buf[n];
continue;
}
else
{
int i;
FT_ULong len;
for ( i = 0; i < num_tables; i++ )
{
FT_Byte* p = face->ttf_data + 12 + 16 * i + 12;
len = FT_PEEK_ULONG( p );
/* Pad to a 4-byte boundary length */
ttf_size += ( len + 3 ) & ~3;
}
status = OTHER_TABLES;
face->ttf_size = ttf_size;
/* there are no more than 256 tables, so no size check here */
if ( FT_REALLOC( face->ttf_data, 12 + 16 * num_tables,
ttf_size + 1 ) )
goto Fail;
}
/* fall through */
case OTHER_TABLES:
/* all other tables are just copied */
if ( count >= ttf_size )
{
FT_ERROR(( "t42_parse_sfnts: too many binary data\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
}
face->ttf_data[count++] = string_buf[n];
}
}
T1_Skip_Spaces( parser );
}
Commit Message:
CWE ID: | t42_parse_sfnts( T42_Face face,
T42_Loader loader )
{
T42_Parser parser = &loader->parser;
FT_Memory memory = parser->root.memory;
FT_Byte* cur;
FT_Byte* limit = parser->root.limit;
FT_Error error;
FT_Int num_tables = 0;
FT_ULong count;
FT_Long n, string_size, old_string_size, real_size;
FT_Byte* string_buf = NULL;
FT_Bool allocated = 0;
T42_Load_Status status;
/* The format is */
/* */
/* /sfnts [ <hexstring> <hexstring> ... ] def */
/* */
/* or */
/* */
/* /sfnts [ */
/* <num_bin_bytes> RD <binary data> */
/* <num_bin_bytes> RD <binary data> */
/* ... */
/* ] def */
/* */
/* with exactly one space after the `RD' token. */
T1_Skip_Spaces( parser );
if ( parser->root.cursor >= limit || *parser->root.cursor++ != '[' )
{
FT_ERROR(( "t42_parse_sfnts: can't find begin of sfnts vector\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
T1_Skip_Spaces( parser );
status = BEFORE_START;
string_size = 0;
old_string_size = 0;
count = 0;
while ( parser->root.cursor < limit )
{
cur = parser->root.cursor;
if ( *cur == ']' )
{
parser->root.cursor++;
goto Exit;
}
else if ( *cur == '<' )
{
T1_Skip_PS_Token( parser );
if ( parser->root.error )
goto Exit;
/* don't include delimiters */
string_size = (FT_Long)( ( parser->root.cursor - cur - 2 + 1 ) / 2 );
if ( FT_REALLOC( string_buf, old_string_size, string_size ) )
goto Fail;
allocated = 1;
parser->root.cursor = cur;
(void)T1_ToBytes( parser, string_buf, string_size, &real_size, 1 );
old_string_size = string_size;
string_size = real_size;
}
else if ( ft_isdigit( *cur ) )
{
if ( allocated )
{
FT_ERROR(( "t42_parse_sfnts: "
"can't handle mixed binary and hex strings\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
string_size = T1_ToInt( parser );
if ( string_size < 0 )
{
FT_ERROR(( "t42_parse_sfnts: invalid string size\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
T1_Skip_PS_Token( parser ); /* `RD' */
if ( parser->root.error )
return;
string_buf = parser->root.cursor + 1; /* one space after `RD' */
if ( limit - parser->root.cursor < string_size )
{
FT_ERROR(( "t42_parse_sfnts: too much binary data\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
else
parser->root.cursor += string_size + 1;
}
if ( !string_buf )
{
FT_ERROR(( "t42_parse_sfnts: invalid data in sfnts array\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
/* A string can have a trailing zero (odd) byte for padding. */
/* Ignore it. */
if ( ( string_size & 1 ) && string_buf[string_size - 1] == 0 )
string_size--;
if ( !string_size )
{
FT_ERROR(( "t42_parse_sfnts: invalid string\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
for ( n = 0; n < string_size; n++ )
{
switch ( status )
{
case BEFORE_START:
/* load offset table, 12 bytes */
if ( count < 12 )
{
face->ttf_data[count++] = string_buf[n];
continue;
}
else
{
num_tables = 16 * face->ttf_data[4] + face->ttf_data[5];
status = BEFORE_TABLE_DIR;
face->ttf_size = 12 + 16 * num_tables;
if ( FT_REALLOC( face->ttf_data, 12, face->ttf_size ) )
goto Fail;
}
/* fall through */
case BEFORE_TABLE_DIR:
/* the offset table is read; read the table directory */
if ( count < face->ttf_size )
{
face->ttf_data[count++] = string_buf[n];
continue;
}
else
{
int i;
FT_ULong len;
for ( i = 0; i < num_tables; i++ )
{
FT_Byte* p = face->ttf_data + 12 + 16 * i + 12;
len = FT_PEEK_ULONG( p );
/* Pad to a 4-byte boundary length */
face->ttf_size += ( len + 3 ) & ~3;
}
status = OTHER_TABLES;
/* there are no more than 256 tables, so no size check here */
if ( FT_REALLOC( face->ttf_data, 12 + 16 * num_tables,
face->ttf_size + 1 ) )
goto Fail;
}
/* fall through */
case OTHER_TABLES:
/* all other tables are just copied */
if ( count >= face->ttf_size )
{
FT_ERROR(( "t42_parse_sfnts: too much binary data\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
}
face->ttf_data[count++] = string_buf[n];
}
}
T1_Skip_Spaces( parser );
}
| 164,862 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void GM2TabStyle::PaintInactiveTabBackground(gfx::Canvas* canvas,
const SkPath& clip) const {
bool has_custom_image;
int fill_id = tab_->controller()->GetBackgroundResourceId(&has_custom_image);
if (!has_custom_image)
fill_id = 0;
PaintTabBackground(canvas, false /* active */, fill_id, 0,
tab_->controller()->MaySetClip() ? &clip : nullptr);
}
Commit Message: Paint tab groups with the group color.
* The background of TabGroupHeader now uses the group color.
* The backgrounds of tabs in the group are tinted with the group color.
This treatment, along with the colors chosen, are intended to be
a placeholder.
Bug: 905491
Change-Id: Ic808548f8eba23064606e7fb8c9bba281d0d117f
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1610504
Commit-Queue: Bret Sepulveda <[email protected]>
Reviewed-by: Taylor Bergquist <[email protected]>
Cr-Commit-Position: refs/heads/master@{#660498}
CWE ID: CWE-20 | void GM2TabStyle::PaintInactiveTabBackground(gfx::Canvas* canvas,
const SkPath& clip) const {
bool has_custom_image;
int fill_id = tab_->controller()->GetBackgroundResourceId(&has_custom_image);
if (!has_custom_image)
fill_id = 0;
PaintTabBackground(canvas, TAB_INACTIVE, fill_id, 0,
tab_->controller()->MaySetClip() ? &clip : nullptr);
}
| 172,523 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: size_t jsvGetString(const JsVar *v, char *str, size_t len) {
assert(len>0);
const char *s = jsvGetConstString(v);
if (s) {
/* don't use strncpy here because we don't
* want to pad the entire buffer with zeros */
len--;
int l = 0;
while (*s && l<len) {
str[l] = s[l];
l++;
}
str[l] = 0;
return l;
} else if (jsvIsInt(v)) {
itostr(v->varData.integer, str, 10);
return strlen(str);
} else if (jsvIsFloat(v)) {
ftoa_bounded(v->varData.floating, str, len);
return strlen(str);
} else if (jsvHasCharacterData(v)) {
assert(!jsvIsStringExt(v));
size_t l = len;
JsvStringIterator it;
jsvStringIteratorNewConst(&it, v, 0);
while (jsvStringIteratorHasChar(&it)) {
if (l--<=1) {
*str = 0;
jsvStringIteratorFree(&it);
return len;
}
*(str++) = jsvStringIteratorGetChar(&it);
jsvStringIteratorNext(&it);
}
jsvStringIteratorFree(&it);
*str = 0;
return len-l;
} else {
JsVar *stringVar = jsvAsString((JsVar*)v, false); // we know we're casting to non-const here
if (stringVar) {
size_t l = jsvGetString(stringVar, str, len); // call again - but this time with converted var
jsvUnLock(stringVar);
return l;
} else {
str[0] = 0;
jsExceptionHere(JSET_INTERNALERROR, "Variable type cannot be converted to string");
return 0;
}
}
}
Commit Message: fix jsvGetString regression
CWE ID: CWE-119 | size_t jsvGetString(const JsVar *v, char *str, size_t len) {
assert(len>0);
const char *s = jsvGetConstString(v);
if (s) {
/* don't use strncpy here because we don't
* want to pad the entire buffer with zeros */
len--;
int l = 0;
while (s[l] && l<len) {
str[l] = s[l];
l++;
}
str[l] = 0;
return l;
} else if (jsvIsInt(v)) {
itostr(v->varData.integer, str, 10);
return strlen(str);
} else if (jsvIsFloat(v)) {
ftoa_bounded(v->varData.floating, str, len);
return strlen(str);
} else if (jsvHasCharacterData(v)) {
assert(!jsvIsStringExt(v));
size_t l = len;
JsvStringIterator it;
jsvStringIteratorNewConst(&it, v, 0);
while (jsvStringIteratorHasChar(&it)) {
if (l--<=1) {
*str = 0;
jsvStringIteratorFree(&it);
return len;
}
*(str++) = jsvStringIteratorGetChar(&it);
jsvStringIteratorNext(&it);
}
jsvStringIteratorFree(&it);
*str = 0;
return len-l;
} else {
JsVar *stringVar = jsvAsString((JsVar*)v, false); // we know we're casting to non-const here
if (stringVar) {
size_t l = jsvGetString(stringVar, str, len); // call again - but this time with converted var
jsvUnLock(stringVar);
return l;
} else {
str[0] = 0;
jsExceptionHere(JSET_INTERNALERROR, "Variable type cannot be converted to string");
return 0;
}
}
}
| 169,210 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int snd_usbmidi_create(struct snd_card *card,
struct usb_interface *iface,
struct list_head *midi_list,
const struct snd_usb_audio_quirk *quirk)
{
struct snd_usb_midi *umidi;
struct snd_usb_midi_endpoint_info endpoints[MIDI_MAX_ENDPOINTS];
int out_ports, in_ports;
int i, err;
umidi = kzalloc(sizeof(*umidi), GFP_KERNEL);
if (!umidi)
return -ENOMEM;
umidi->dev = interface_to_usbdev(iface);
umidi->card = card;
umidi->iface = iface;
umidi->quirk = quirk;
umidi->usb_protocol_ops = &snd_usbmidi_standard_ops;
spin_lock_init(&umidi->disc_lock);
init_rwsem(&umidi->disc_rwsem);
mutex_init(&umidi->mutex);
umidi->usb_id = USB_ID(le16_to_cpu(umidi->dev->descriptor.idVendor),
le16_to_cpu(umidi->dev->descriptor.idProduct));
setup_timer(&umidi->error_timer, snd_usbmidi_error_timer,
(unsigned long)umidi);
/* detect the endpoint(s) to use */
memset(endpoints, 0, sizeof(endpoints));
switch (quirk ? quirk->type : QUIRK_MIDI_STANDARD_INTERFACE) {
case QUIRK_MIDI_STANDARD_INTERFACE:
err = snd_usbmidi_get_ms_info(umidi, endpoints);
if (umidi->usb_id == USB_ID(0x0763, 0x0150)) /* M-Audio Uno */
umidi->usb_protocol_ops =
&snd_usbmidi_maudio_broken_running_status_ops;
break;
case QUIRK_MIDI_US122L:
umidi->usb_protocol_ops = &snd_usbmidi_122l_ops;
/* fall through */
case QUIRK_MIDI_FIXED_ENDPOINT:
memcpy(&endpoints[0], quirk->data,
sizeof(struct snd_usb_midi_endpoint_info));
err = snd_usbmidi_detect_endpoints(umidi, &endpoints[0], 1);
break;
case QUIRK_MIDI_YAMAHA:
err = snd_usbmidi_detect_yamaha(umidi, &endpoints[0]);
break;
case QUIRK_MIDI_ROLAND:
err = snd_usbmidi_detect_roland(umidi, &endpoints[0]);
break;
case QUIRK_MIDI_MIDIMAN:
umidi->usb_protocol_ops = &snd_usbmidi_midiman_ops;
memcpy(&endpoints[0], quirk->data,
sizeof(struct snd_usb_midi_endpoint_info));
err = 0;
break;
case QUIRK_MIDI_NOVATION:
umidi->usb_protocol_ops = &snd_usbmidi_novation_ops;
err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
break;
case QUIRK_MIDI_RAW_BYTES:
umidi->usb_protocol_ops = &snd_usbmidi_raw_ops;
/*
* Interface 1 contains isochronous endpoints, but with the same
* numbers as in interface 0. Since it is interface 1 that the
* USB core has most recently seen, these descriptors are now
* associated with the endpoint numbers. This will foul up our
* attempts to submit bulk/interrupt URBs to the endpoints in
* interface 0, so we have to make sure that the USB core looks
* again at interface 0 by calling usb_set_interface() on it.
*/
if (umidi->usb_id == USB_ID(0x07fd, 0x0001)) /* MOTU Fastlane */
usb_set_interface(umidi->dev, 0, 0);
err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
break;
case QUIRK_MIDI_EMAGIC:
umidi->usb_protocol_ops = &snd_usbmidi_emagic_ops;
memcpy(&endpoints[0], quirk->data,
sizeof(struct snd_usb_midi_endpoint_info));
err = snd_usbmidi_detect_endpoints(umidi, &endpoints[0], 1);
break;
case QUIRK_MIDI_CME:
umidi->usb_protocol_ops = &snd_usbmidi_cme_ops;
err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
break;
case QUIRK_MIDI_AKAI:
umidi->usb_protocol_ops = &snd_usbmidi_akai_ops;
err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
/* endpoint 1 is input-only */
endpoints[1].out_cables = 0;
break;
case QUIRK_MIDI_FTDI:
umidi->usb_protocol_ops = &snd_usbmidi_ftdi_ops;
/* set baud rate to 31250 (48 MHz / 16 / 96) */
err = usb_control_msg(umidi->dev, usb_sndctrlpipe(umidi->dev, 0),
3, 0x40, 0x60, 0, NULL, 0, 1000);
if (err < 0)
break;
err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
break;
case QUIRK_MIDI_CH345:
umidi->usb_protocol_ops = &snd_usbmidi_ch345_broken_sysex_ops;
err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
break;
default:
dev_err(&umidi->dev->dev, "invalid quirk type %d\n",
quirk->type);
err = -ENXIO;
break;
}
if (err < 0) {
kfree(umidi);
return err;
}
/* create rawmidi device */
out_ports = 0;
in_ports = 0;
for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) {
out_ports += hweight16(endpoints[i].out_cables);
in_ports += hweight16(endpoints[i].in_cables);
}
err = snd_usbmidi_create_rawmidi(umidi, out_ports, in_ports);
if (err < 0) {
kfree(umidi);
return err;
}
/* create endpoint/port structures */
if (quirk && quirk->type == QUIRK_MIDI_MIDIMAN)
err = snd_usbmidi_create_endpoints_midiman(umidi, &endpoints[0]);
else
err = snd_usbmidi_create_endpoints(umidi, endpoints);
if (err < 0) {
snd_usbmidi_free(umidi);
return err;
}
usb_autopm_get_interface_no_resume(umidi->iface);
list_add_tail(&umidi->list, midi_list);
return 0;
}
Commit Message: ALSA: usb-audio: avoid freeing umidi object twice
The 'umidi' object will be free'd on the error path by snd_usbmidi_free()
when tearing down the rawmidi interface. So we shouldn't try to free it
in snd_usbmidi_create() after having registered the rawmidi interface.
Found by KASAN.
Signed-off-by: Andrey Konovalov <[email protected]>
Acked-by: Clemens Ladisch <[email protected]>
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
CWE ID: | int snd_usbmidi_create(struct snd_card *card,
struct usb_interface *iface,
struct list_head *midi_list,
const struct snd_usb_audio_quirk *quirk)
{
struct snd_usb_midi *umidi;
struct snd_usb_midi_endpoint_info endpoints[MIDI_MAX_ENDPOINTS];
int out_ports, in_ports;
int i, err;
umidi = kzalloc(sizeof(*umidi), GFP_KERNEL);
if (!umidi)
return -ENOMEM;
umidi->dev = interface_to_usbdev(iface);
umidi->card = card;
umidi->iface = iface;
umidi->quirk = quirk;
umidi->usb_protocol_ops = &snd_usbmidi_standard_ops;
spin_lock_init(&umidi->disc_lock);
init_rwsem(&umidi->disc_rwsem);
mutex_init(&umidi->mutex);
umidi->usb_id = USB_ID(le16_to_cpu(umidi->dev->descriptor.idVendor),
le16_to_cpu(umidi->dev->descriptor.idProduct));
setup_timer(&umidi->error_timer, snd_usbmidi_error_timer,
(unsigned long)umidi);
/* detect the endpoint(s) to use */
memset(endpoints, 0, sizeof(endpoints));
switch (quirk ? quirk->type : QUIRK_MIDI_STANDARD_INTERFACE) {
case QUIRK_MIDI_STANDARD_INTERFACE:
err = snd_usbmidi_get_ms_info(umidi, endpoints);
if (umidi->usb_id == USB_ID(0x0763, 0x0150)) /* M-Audio Uno */
umidi->usb_protocol_ops =
&snd_usbmidi_maudio_broken_running_status_ops;
break;
case QUIRK_MIDI_US122L:
umidi->usb_protocol_ops = &snd_usbmidi_122l_ops;
/* fall through */
case QUIRK_MIDI_FIXED_ENDPOINT:
memcpy(&endpoints[0], quirk->data,
sizeof(struct snd_usb_midi_endpoint_info));
err = snd_usbmidi_detect_endpoints(umidi, &endpoints[0], 1);
break;
case QUIRK_MIDI_YAMAHA:
err = snd_usbmidi_detect_yamaha(umidi, &endpoints[0]);
break;
case QUIRK_MIDI_ROLAND:
err = snd_usbmidi_detect_roland(umidi, &endpoints[0]);
break;
case QUIRK_MIDI_MIDIMAN:
umidi->usb_protocol_ops = &snd_usbmidi_midiman_ops;
memcpy(&endpoints[0], quirk->data,
sizeof(struct snd_usb_midi_endpoint_info));
err = 0;
break;
case QUIRK_MIDI_NOVATION:
umidi->usb_protocol_ops = &snd_usbmidi_novation_ops;
err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
break;
case QUIRK_MIDI_RAW_BYTES:
umidi->usb_protocol_ops = &snd_usbmidi_raw_ops;
/*
* Interface 1 contains isochronous endpoints, but with the same
* numbers as in interface 0. Since it is interface 1 that the
* USB core has most recently seen, these descriptors are now
* associated with the endpoint numbers. This will foul up our
* attempts to submit bulk/interrupt URBs to the endpoints in
* interface 0, so we have to make sure that the USB core looks
* again at interface 0 by calling usb_set_interface() on it.
*/
if (umidi->usb_id == USB_ID(0x07fd, 0x0001)) /* MOTU Fastlane */
usb_set_interface(umidi->dev, 0, 0);
err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
break;
case QUIRK_MIDI_EMAGIC:
umidi->usb_protocol_ops = &snd_usbmidi_emagic_ops;
memcpy(&endpoints[0], quirk->data,
sizeof(struct snd_usb_midi_endpoint_info));
err = snd_usbmidi_detect_endpoints(umidi, &endpoints[0], 1);
break;
case QUIRK_MIDI_CME:
umidi->usb_protocol_ops = &snd_usbmidi_cme_ops;
err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
break;
case QUIRK_MIDI_AKAI:
umidi->usb_protocol_ops = &snd_usbmidi_akai_ops;
err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
/* endpoint 1 is input-only */
endpoints[1].out_cables = 0;
break;
case QUIRK_MIDI_FTDI:
umidi->usb_protocol_ops = &snd_usbmidi_ftdi_ops;
/* set baud rate to 31250 (48 MHz / 16 / 96) */
err = usb_control_msg(umidi->dev, usb_sndctrlpipe(umidi->dev, 0),
3, 0x40, 0x60, 0, NULL, 0, 1000);
if (err < 0)
break;
err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
break;
case QUIRK_MIDI_CH345:
umidi->usb_protocol_ops = &snd_usbmidi_ch345_broken_sysex_ops;
err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
break;
default:
dev_err(&umidi->dev->dev, "invalid quirk type %d\n",
quirk->type);
err = -ENXIO;
break;
}
if (err < 0) {
kfree(umidi);
return err;
}
/* create rawmidi device */
out_ports = 0;
in_ports = 0;
for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) {
out_ports += hweight16(endpoints[i].out_cables);
in_ports += hweight16(endpoints[i].in_cables);
}
err = snd_usbmidi_create_rawmidi(umidi, out_ports, in_ports);
if (err < 0) {
kfree(umidi);
return err;
}
/* create endpoint/port structures */
if (quirk && quirk->type == QUIRK_MIDI_MIDIMAN)
err = snd_usbmidi_create_endpoints_midiman(umidi, &endpoints[0]);
else
err = snd_usbmidi_create_endpoints(umidi, endpoints);
if (err < 0) {
return err;
}
usb_autopm_get_interface_no_resume(umidi->iface);
list_add_tail(&umidi->list, midi_list);
return 0;
}
| 167,412 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: get_principal_2_svc(gprinc_arg *arg, struct svc_req *rqstp)
{
static gprinc_ret ret;
char *prime_arg, *funcname;
gss_buffer_desc client_name,
service_name;
OM_uint32 minor_stat;
kadm5_server_handle_t handle;
const char *errmsg = NULL;
xdr_free(xdr_gprinc_ret, &ret);
if ((ret.code = new_server_handle(arg->api_version, rqstp, &handle)))
goto exit_func;
if ((ret.code = check_handle((void *)handle)))
goto exit_func;
ret.api_version = handle->api_version;
funcname = "kadm5_get_principal";
if (setup_gss_names(rqstp, &client_name, &service_name) < 0) {
ret.code = KADM5_FAILURE;
goto exit_func;
}
if (krb5_unparse_name(handle->context, arg->princ, &prime_arg)) {
ret.code = KADM5_BAD_PRINCIPAL;
goto exit_func;
}
if (! cmp_gss_krb5_name(handle, rqst2name(rqstp), arg->princ) &&
(CHANGEPW_SERVICE(rqstp) || !kadm5int_acl_check(handle->context,
rqst2name(rqstp),
ACL_INQUIRE,
arg->princ,
NULL))) {
ret.code = KADM5_AUTH_GET;
log_unauth(funcname, prime_arg,
&client_name, &service_name, rqstp);
} else {
ret.code = kadm5_get_principal(handle, arg->princ, &ret.rec,
arg->mask);
if( ret.code != 0 )
errmsg = krb5_get_error_message(handle->context, ret.code);
log_done(funcname, prime_arg, errmsg,
&client_name, &service_name, rqstp);
if (errmsg != NULL)
krb5_free_error_message(handle->context, errmsg);
}
free(prime_arg);
gss_release_buffer(&minor_stat, &client_name);
gss_release_buffer(&minor_stat, &service_name);
exit_func:
free_server_handle(handle);
return &ret;
}
Commit Message: Fix leaks in kadmin server stubs [CVE-2015-8631]
In each kadmind server stub, initialize the client_name and
server_name variables, and release them in the cleanup handler. Many
of the stubs will otherwise leak the client and server name if
krb5_unparse_name() fails. Also make sure to free the prime_arg
variables in rename_principal_2_svc(), or we can leak the first one if
unparsing the second one fails. Discovered by Simo Sorce.
CVE-2015-8631:
In all versions of MIT krb5, an authenticated attacker can cause
kadmind to leak memory by supplying a null principal name in a request
which uses one. Repeating these requests will eventually cause
kadmind to exhaust all available memory.
CVSSv2 Vector: AV:N/AC:L/Au:S/C:N/I:N/A:C/E:POC/RL:OF/RC:C
ticket: 8343 (new)
target_version: 1.14-next
target_version: 1.13-next
tags: pullup
CWE ID: CWE-119 | get_principal_2_svc(gprinc_arg *arg, struct svc_req *rqstp)
{
static gprinc_ret ret;
char *prime_arg, *funcname;
gss_buffer_desc client_name = GSS_C_EMPTY_BUFFER;
gss_buffer_desc service_name = GSS_C_EMPTY_BUFFER;
OM_uint32 minor_stat;
kadm5_server_handle_t handle;
const char *errmsg = NULL;
xdr_free(xdr_gprinc_ret, &ret);
if ((ret.code = new_server_handle(arg->api_version, rqstp, &handle)))
goto exit_func;
if ((ret.code = check_handle((void *)handle)))
goto exit_func;
ret.api_version = handle->api_version;
funcname = "kadm5_get_principal";
if (setup_gss_names(rqstp, &client_name, &service_name) < 0) {
ret.code = KADM5_FAILURE;
goto exit_func;
}
if (krb5_unparse_name(handle->context, arg->princ, &prime_arg)) {
ret.code = KADM5_BAD_PRINCIPAL;
goto exit_func;
}
if (! cmp_gss_krb5_name(handle, rqst2name(rqstp), arg->princ) &&
(CHANGEPW_SERVICE(rqstp) || !kadm5int_acl_check(handle->context,
rqst2name(rqstp),
ACL_INQUIRE,
arg->princ,
NULL))) {
ret.code = KADM5_AUTH_GET;
log_unauth(funcname, prime_arg,
&client_name, &service_name, rqstp);
} else {
ret.code = kadm5_get_principal(handle, arg->princ, &ret.rec,
arg->mask);
if( ret.code != 0 )
errmsg = krb5_get_error_message(handle->context, ret.code);
log_done(funcname, prime_arg, errmsg,
&client_name, &service_name, rqstp);
if (errmsg != NULL)
krb5_free_error_message(handle->context, errmsg);
}
free(prime_arg);
exit_func:
gss_release_buffer(&minor_stat, &client_name);
gss_release_buffer(&minor_stat, &service_name);
free_server_handle(handle);
return &ret;
}
| 167,515 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: SProcXFixesCreatePointerBarrier(ClientPtr client)
{
REQUEST(xXFixesCreatePointerBarrierReq);
int i;
int i;
CARD16 *in_devices = (CARD16 *) &stuff[1];
swaps(&stuff->length);
swaps(&stuff->num_devices);
REQUEST_FIXED_SIZE(xXFixesCreatePointerBarrierReq, pad_to_int32(stuff->num_devices));
swaps(&stuff->x1);
swaps(&stuff->y1);
swaps(&stuff->x2);
swaps(&stuff->y2);
swapl(&stuff->directions);
for (i = 0; i < stuff->num_devices; i++) {
swaps(in_devices + i);
}
return ProcXFixesVector[stuff->xfixesReqType] (client);
}
Commit Message:
CWE ID: CWE-20 | SProcXFixesCreatePointerBarrier(ClientPtr client)
{
REQUEST(xXFixesCreatePointerBarrierReq);
int i;
int i;
CARD16 *in_devices = (CARD16 *) &stuff[1];
REQUEST_AT_LEAST_SIZE(xXFixesCreatePointerBarrierReq);
swaps(&stuff->length);
swaps(&stuff->num_devices);
REQUEST_FIXED_SIZE(xXFixesCreatePointerBarrierReq, pad_to_int32(stuff->num_devices));
swaps(&stuff->x1);
swaps(&stuff->y1);
swaps(&stuff->x2);
swaps(&stuff->y2);
swapl(&stuff->directions);
for (i = 0; i < stuff->num_devices; i++) {
swaps(in_devices + i);
}
return ProcXFixesVector[stuff->xfixesReqType] (client);
}
| 165,440 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void FragmentPaintPropertyTreeBuilder::UpdateOverflowClip() {
DCHECK(properties_);
if (NeedsPaintPropertyUpdate()) {
if (NeedsOverflowClip(object_) && !CanOmitOverflowClip(object_)) {
ClipPaintPropertyNode::State state;
state.local_transform_space = context_.current.transform;
if (!RuntimeEnabledFeatures::SlimmingPaintV175Enabled() &&
object_.IsSVGForeignObject()) {
state.clip_rect = ToClipRect(ToLayoutBox(object_).FrameRect());
} else if (object_.IsBox()) {
state.clip_rect = ToClipRect(ToLayoutBox(object_).OverflowClipRect(
context_.current.paint_offset));
state.clip_rect_excluding_overlay_scrollbars =
ToClipRect(ToLayoutBox(object_).OverflowClipRect(
context_.current.paint_offset,
kExcludeOverlayScrollbarSizeForHitTesting));
} else {
DCHECK(object_.IsSVGViewportContainer());
const auto& viewport_container = ToLayoutSVGViewportContainer(object_);
state.clip_rect = FloatRoundedRect(
viewport_container.LocalToSVGParentTransform().Inverse().MapRect(
viewport_container.Viewport()));
}
const ClipPaintPropertyNode* existing = properties_->OverflowClip();
bool equal_ignoring_hit_test_rects =
!!existing &&
existing->EqualIgnoringHitTestRects(context_.current.clip, state);
OnUpdateClip(properties_->UpdateOverflowClip(context_.current.clip,
std::move(state)),
equal_ignoring_hit_test_rects);
} else {
OnClearClip(properties_->ClearOverflowClip());
}
}
if (auto* overflow_clip = properties_->OverflowClip())
context_.current.clip = overflow_clip;
}
Commit Message: Reland "[CI] Make paint property nodes non-ref-counted"
This reverts commit 887383b30842d9d9006e11bb6932660a3cb5b1b7.
Reason for revert: Retry in M69.
Original change's description:
> Revert "[CI] Make paint property nodes non-ref-counted"
>
> This reverts commit 70fc0b018c9517558b7aa2be00edf2debb449123.
>
> Reason for revert: Caused bugs found by clusterfuzz
>
> Original change's description:
> > [CI] Make paint property nodes non-ref-counted
> >
> > Now all paint property nodes are owned by ObjectPaintProperties
> > (and LocalFrameView temporarily before removing non-RLS mode).
> > Others just use raw pointers or references.
> >
> > Bug: 833496
> > Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
> > Change-Id: I2d544fe153bb94698623248748df63c8aa2081ae
> > Reviewed-on: https://chromium-review.googlesource.com/1031101
> > Reviewed-by: Tien-Ren Chen <[email protected]>
> > Commit-Queue: Xianzhu Wang <[email protected]>
> > Cr-Commit-Position: refs/heads/master@{#554626}
>
> [email protected],[email protected],[email protected]
>
> Change-Id: I02bb50d6744cb81a797246a0116b677e80a3c69f
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Bug: 833496,837932,837943
> Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
> Reviewed-on: https://chromium-review.googlesource.com/1034292
> Reviewed-by: Xianzhu Wang <[email protected]>
> Commit-Queue: Xianzhu Wang <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#554653}
[email protected],[email protected],[email protected]
# Not skipping CQ checks because original CL landed > 1 day ago.
Bug: 833496, 837932, 837943
Change-Id: I0b4ef70db1f1f211ba97c30d617225355c750992
Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
Reviewed-on: https://chromium-review.googlesource.com/1083491
Commit-Queue: Xianzhu Wang <[email protected]>
Reviewed-by: Xianzhu Wang <[email protected]>
Cr-Commit-Position: refs/heads/master@{#563930}
CWE ID: | void FragmentPaintPropertyTreeBuilder::UpdateOverflowClip() {
DCHECK(properties_);
if (NeedsPaintPropertyUpdate()) {
if (NeedsOverflowClip(object_) && !CanOmitOverflowClip(object_)) {
ClipPaintPropertyNode::State state;
state.local_transform_space = context_.current.transform;
if (!RuntimeEnabledFeatures::SlimmingPaintV175Enabled() &&
object_.IsSVGForeignObject()) {
state.clip_rect = ToClipRect(ToLayoutBox(object_).FrameRect());
} else if (object_.IsBox()) {
state.clip_rect = ToClipRect(ToLayoutBox(object_).OverflowClipRect(
context_.current.paint_offset));
state.clip_rect_excluding_overlay_scrollbars =
ToClipRect(ToLayoutBox(object_).OverflowClipRect(
context_.current.paint_offset,
kExcludeOverlayScrollbarSizeForHitTesting));
} else {
DCHECK(object_.IsSVGViewportContainer());
const auto& viewport_container = ToLayoutSVGViewportContainer(object_);
state.clip_rect = FloatRoundedRect(
viewport_container.LocalToSVGParentTransform().Inverse().MapRect(
viewport_container.Viewport()));
}
const ClipPaintPropertyNode* existing = properties_->OverflowClip();
bool equal_ignoring_hit_test_rects =
!!existing &&
existing->EqualIgnoringHitTestRects(context_.current.clip, state);
OnUpdateClip(properties_->UpdateOverflowClip(*context_.current.clip,
std::move(state)),
equal_ignoring_hit_test_rects);
} else {
OnClearClip(properties_->ClearOverflowClip());
}
}
if (auto* overflow_clip = properties_->OverflowClip())
context_.current.clip = overflow_clip;
}
| 171,800 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static BOOLEAN flush_incoming_que_on_wr_signal_l(l2cap_socket *sock)
{
uint8_t *buf;
uint32_t len;
while (packet_get_head_l(sock, &buf, &len)) {
int sent = send(sock->our_fd, buf, len, MSG_DONTWAIT);
if (sent == (signed)len)
osi_free(buf);
else if (sent >= 0) {
packet_put_head_l(sock, buf + sent, len - sent);
osi_free(buf);
if (!sent) /* special case if other end not keeping up */
return TRUE;
}
else {
packet_put_head_l(sock, buf, len);
osi_free(buf);
return errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN;
}
}
return FALSE;
}
Commit Message: DO NOT MERGE Fix potential DoS caused by delivering signal to BT process
Bug: 28885210
Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360
Conflicts:
btif/co/bta_hh_co.c
btif/src/btif_core.c
Merge conflict resolution of ag/1161415 (referencing ag/1164670)
- Directly into mnc-mr2-release
CWE ID: CWE-284 | static BOOLEAN flush_incoming_que_on_wr_signal_l(l2cap_socket *sock)
{
uint8_t *buf;
uint32_t len;
while (packet_get_head_l(sock, &buf, &len)) {
int sent = TEMP_FAILURE_RETRY(send(sock->our_fd, buf, len, MSG_DONTWAIT));
if (sent == (signed)len)
osi_free(buf);
else if (sent >= 0) {
packet_put_head_l(sock, buf + sent, len - sent);
osi_free(buf);
if (!sent) /* special case if other end not keeping up */
return TRUE;
}
else {
packet_put_head_l(sock, buf, len);
osi_free(buf);
return errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN;
}
}
return FALSE;
}
| 173,454 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: WORD32 ih264d_video_decode(iv_obj_t *dec_hdl, void *pv_api_ip, void *pv_api_op)
{
/* ! */
dec_struct_t * ps_dec = (dec_struct_t *)(dec_hdl->pv_codec_handle);
WORD32 i4_err_status = 0;
UWORD8 *pu1_buf = NULL;
WORD32 buflen;
UWORD32 u4_max_ofst, u4_length_of_start_code = 0;
UWORD32 bytes_consumed = 0;
UWORD32 cur_slice_is_nonref = 0;
UWORD32 u4_next_is_aud;
UWORD32 u4_first_start_code_found = 0;
WORD32 ret = 0,api_ret_value = IV_SUCCESS;
WORD32 header_data_left = 0,frame_data_left = 0;
UWORD8 *pu1_bitstrm_buf;
ivd_video_decode_ip_t *ps_dec_ip;
ivd_video_decode_op_t *ps_dec_op;
ithread_set_name((void*)"Parse_thread");
ps_dec_ip = (ivd_video_decode_ip_t *)pv_api_ip;
ps_dec_op = (ivd_video_decode_op_t *)pv_api_op;
ps_dec->pv_dec_out = ps_dec_op;
if(ps_dec->init_done != 1)
{
return IV_FAIL;
}
/*Data memory barries instruction,so that bitstream write by the application is complete*/
DATA_SYNC();
if(0 == ps_dec->u1_flushfrm)
{
if(ps_dec_ip->pv_stream_buffer == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_FRM_BS_BUF_NULL;
return IV_FAIL;
}
if(ps_dec_ip->u4_num_Bytes <= 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_NUMBYTES_INV;
return IV_FAIL;
}
}
ps_dec->u1_pic_decode_done = 0;
ps_dec_op->u4_num_bytes_consumed = 0;
ps_dec->ps_out_buffer = NULL;
if(ps_dec_ip->u4_size
>= offsetof(ivd_video_decode_ip_t, s_out_buffer))
ps_dec->ps_out_buffer = &ps_dec_ip->s_out_buffer;
ps_dec->u4_fmt_conv_cur_row = 0;
ps_dec->u4_output_present = 0;
ps_dec->s_disp_op.u4_error_code = 1;
ps_dec->u4_fmt_conv_num_rows = FMT_CONV_NUM_ROWS;
if(0 == ps_dec->u4_share_disp_buf
&& ps_dec->i4_decode_header == 0)
{
UWORD32 i;
if(ps_dec->ps_out_buffer->u4_num_bufs == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_ZERO_OP_BUFS;
return IV_FAIL;
}
for(i = 0; i < ps_dec->ps_out_buffer->u4_num_bufs; i++)
{
if(ps_dec->ps_out_buffer->pu1_bufs[i] == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_OP_BUF_NULL;
return IV_FAIL;
}
if(ps_dec->ps_out_buffer->u4_min_out_buf_size[i] == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |=
IVD_DISP_FRM_ZERO_OP_BUF_SIZE;
return IV_FAIL;
}
}
}
if(ps_dec->u4_total_frames_decoded >= NUM_FRAMES_LIMIT)
{
ps_dec_op->u4_error_code = ERROR_FRAME_LIMIT_OVER;
return IV_FAIL;
}
/* ! */
ps_dec->u4_ts = ps_dec_ip->u4_ts;
ps_dec_op->u4_error_code = 0;
ps_dec_op->e_pic_type = -1;
ps_dec_op->u4_output_present = 0;
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec->i4_frametype = -1;
ps_dec->i4_content_type = -1;
/*
* For field pictures, set the bottom and top picture decoded u4_flag correctly.
*/
{
if((TOP_FIELD_ONLY | BOT_FIELD_ONLY) == ps_dec->u1_top_bottom_decoded)
{
ps_dec->u1_top_bottom_decoded = 0;
}
}
ps_dec->u4_slice_start_code_found = 0;
/* In case the deocder is not in flush mode(in shared mode),
then decoder has to pick up a buffer to write current frame.
Check if a frame is available in such cases */
if(ps_dec->u1_init_dec_flag == 1 && ps_dec->u4_share_disp_buf == 1
&& ps_dec->u1_flushfrm == 0)
{
UWORD32 i;
WORD32 disp_avail = 0, free_id;
/* Check if at least one buffer is available with the codec */
/* If not then return to application with error */
for(i = 0; i < ps_dec->u1_pic_bufs; i++)
{
if(0 == ps_dec->u4_disp_buf_mapping[i]
|| 1 == ps_dec->u4_disp_buf_to_be_freed[i])
{
disp_avail = 1;
break;
}
}
if(0 == disp_avail)
{
/* If something is queued for display wait for that buffer to be returned */
ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
return (IV_FAIL);
}
while(1)
{
pic_buffer_t *ps_pic_buf;
ps_pic_buf = (pic_buffer_t *)ih264_buf_mgr_get_next_free(
(buf_mgr_t *)ps_dec->pv_pic_buf_mgr, &free_id);
if(ps_pic_buf == NULL)
{
UWORD32 i, display_queued = 0;
/* check if any buffer was given for display which is not returned yet */
for(i = 0; i < (MAX_DISP_BUFS_NEW); i++)
{
if(0 != ps_dec->u4_disp_buf_mapping[i])
{
display_queued = 1;
break;
}
}
/* If some buffer is queued for display, then codec has to singal an error and wait
for that buffer to be returned.
If nothing is queued for display then codec has ownership of all display buffers
and it can reuse any of the existing buffers and continue decoding */
if(1 == display_queued)
{
/* If something is queued for display wait for that buffer to be returned */
ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL;
ps_dec_op->u4_error_code |= (1
<< IVD_UNSUPPORTEDPARAM);
return (IV_FAIL);
}
}
else
{
/* If the buffer is with display, then mark it as in use and then look for a buffer again */
if(1 == ps_dec->u4_disp_buf_mapping[free_id])
{
ih264_buf_mgr_set_status(
(buf_mgr_t *)ps_dec->pv_pic_buf_mgr,
free_id,
BUF_MGR_IO);
}
else
{
/**
* Found a free buffer for present call. Release it now.
* Will be again obtained later.
*/
ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_pic_buf_mgr,
free_id,
BUF_MGR_IO);
break;
}
}
}
}
if(ps_dec->u1_flushfrm && ps_dec->u1_init_dec_flag)
{
ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer,
&(ps_dec->s_disp_op));
if(0 == ps_dec->s_disp_op.u4_error_code)
{
ps_dec->u4_fmt_conv_cur_row = 0;
ps_dec->u4_fmt_conv_num_rows = ps_dec->s_disp_frame_info.u4_y_ht;
ih264d_format_convert(ps_dec, &(ps_dec->s_disp_op),
ps_dec->u4_fmt_conv_cur_row,
ps_dec->u4_fmt_conv_num_rows);
ps_dec->u4_fmt_conv_cur_row += ps_dec->u4_fmt_conv_num_rows;
ps_dec->u4_output_present = 1;
}
ih264d_release_display_field(ps_dec, &(ps_dec->s_disp_op));
ps_dec_op->u4_pic_wd = (UWORD32)ps_dec->u2_disp_width;
ps_dec_op->u4_pic_ht = (UWORD32)ps_dec->u2_disp_height;
ps_dec_op->u4_new_seq = 0;
ps_dec_op->u4_output_present = ps_dec->u4_output_present;
ps_dec_op->u4_progressive_frame_flag =
ps_dec->s_disp_op.u4_progressive_frame_flag;
ps_dec_op->e_output_format =
ps_dec->s_disp_op.e_output_format;
ps_dec_op->s_disp_frm_buf = ps_dec->s_disp_op.s_disp_frm_buf;
ps_dec_op->e4_fld_type = ps_dec->s_disp_op.e4_fld_type;
ps_dec_op->u4_ts = ps_dec->s_disp_op.u4_ts;
ps_dec_op->u4_disp_buf_id = ps_dec->s_disp_op.u4_disp_buf_id;
/*In the case of flush ,since no frame is decoded set pic type as invalid*/
ps_dec_op->u4_is_ref_flag = -1;
ps_dec_op->e_pic_type = IV_NA_FRAME;
ps_dec_op->u4_frame_decoded_flag = 0;
if(0 == ps_dec->s_disp_op.u4_error_code)
{
return (IV_SUCCESS);
}
else
return (IV_FAIL);
}
if(ps_dec->u1_res_changed == 1)
{
/*if resolution has changed and all buffers have been flushed, reset decoder*/
ih264d_init_decoder(ps_dec);
}
ps_dec->u4_prev_nal_skipped = 0;
ps_dec->u2_cur_mb_addr = 0;
ps_dec->u2_total_mbs_coded = 0;
ps_dec->u2_cur_slice_num = 0;
ps_dec->cur_dec_mb_num = 0;
ps_dec->cur_recon_mb_num = 0;
ps_dec->u4_first_slice_in_pic = 2;
ps_dec->u1_slice_header_done = 0;
ps_dec->u1_dangling_field = 0;
ps_dec->u4_dec_thread_created = 0;
ps_dec->u4_bs_deblk_thread_created = 0;
ps_dec->u4_cur_bs_mb_num = 0;
DEBUG_THREADS_PRINTF(" Starting process call\n");
ps_dec->u4_pic_buf_got = 0;
do
{
WORD32 buf_size;
pu1_buf = (UWORD8*)ps_dec_ip->pv_stream_buffer
+ ps_dec_op->u4_num_bytes_consumed;
u4_max_ofst = ps_dec_ip->u4_num_Bytes
- ps_dec_op->u4_num_bytes_consumed;
/* If dynamic bitstream buffer is not allocated and
* header decode is done, then allocate dynamic bitstream buffer
*/
if((NULL == ps_dec->pu1_bits_buf_dynamic) &&
(ps_dec->i4_header_decoded & 1))
{
WORD32 size;
void *pv_buf;
void *pv_mem_ctxt = ps_dec->pv_mem_ctxt;
size = MAX(256000, ps_dec->u2_pic_wd * ps_dec->u2_pic_ht * 3 / 2);
pv_buf = ps_dec->pf_aligned_alloc(pv_mem_ctxt, 128, size);
RETURN_IF((NULL == pv_buf), IV_FAIL);
ps_dec->pu1_bits_buf_dynamic = pv_buf;
ps_dec->u4_dynamic_bits_buf_size = size;
}
if(ps_dec->pu1_bits_buf_dynamic)
{
pu1_bitstrm_buf = ps_dec->pu1_bits_buf_dynamic;
buf_size = ps_dec->u4_dynamic_bits_buf_size;
}
else
{
pu1_bitstrm_buf = ps_dec->pu1_bits_buf_static;
buf_size = ps_dec->u4_static_bits_buf_size;
}
u4_next_is_aud = 0;
buflen = ih264d_find_start_code(pu1_buf, 0, u4_max_ofst,
&u4_length_of_start_code,
&u4_next_is_aud);
if(buflen == -1)
buflen = 0;
/* Ignore bytes beyond the allocated size of intermediate buffer */
buflen = MIN(buflen, buf_size);
bytes_consumed = buflen + u4_length_of_start_code;
ps_dec_op->u4_num_bytes_consumed += bytes_consumed;
{
UWORD8 u1_firstbyte, u1_nal_ref_idc;
if(ps_dec->i4_app_skip_mode == IVD_SKIP_B)
{
u1_firstbyte = *(pu1_buf + u4_length_of_start_code);
u1_nal_ref_idc = (UWORD8)(NAL_REF_IDC(u1_firstbyte));
if(u1_nal_ref_idc == 0)
{
/*skip non reference frames*/
cur_slice_is_nonref = 1;
continue;
}
else
{
if(1 == cur_slice_is_nonref)
{
/*We have encountered a referenced frame,return to app*/
ps_dec_op->u4_num_bytes_consumed -=
bytes_consumed;
ps_dec_op->e_pic_type = IV_B_FRAME;
ps_dec_op->u4_error_code =
IVD_DEC_FRM_SKIPPED;
ps_dec_op->u4_error_code |= (1
<< IVD_UNSUPPORTEDPARAM);
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec_op->u4_size =
sizeof(ivd_video_decode_op_t);
/*signal the decode thread*/
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
return (IV_FAIL);
}
}
}
}
if(buflen)
{
memcpy(pu1_bitstrm_buf, pu1_buf + u4_length_of_start_code,
buflen);
/* Decoder may read extra 8 bytes near end of the frame */
if((buflen + 8) < buf_size)
{
memset(pu1_bitstrm_buf + buflen, 0, 8);
}
u4_first_start_code_found = 1;
}
else
{
/*start code not found*/
if(u4_first_start_code_found == 0)
{
/*no start codes found in current process call*/
ps_dec->i4_error_code = ERROR_START_CODE_NOT_FOUND;
ps_dec_op->u4_error_code |= 1 << IVD_INSUFFICIENTDATA;
if(ps_dec->u4_pic_buf_got == 0)
{
ih264d_fill_output_struct_from_context(ps_dec,
ps_dec_op);
ps_dec_op->u4_error_code = ps_dec->i4_error_code;
ps_dec_op->u4_frame_decoded_flag = 0;
return (IV_FAIL);
}
else
{
ps_dec->u1_pic_decode_done = 1;
continue;
}
}
else
{
/* a start code has already been found earlier in the same process call*/
frame_data_left = 0;
continue;
}
}
ps_dec->u4_return_to_app = 0;
ret = ih264d_parse_nal_unit(dec_hdl, ps_dec_op,
pu1_bitstrm_buf, buflen);
if(ret != OK)
{
UWORD32 error = ih264d_map_error(ret);
ps_dec_op->u4_error_code = error | ret;
api_ret_value = IV_FAIL;
if((ret == IVD_RES_CHANGED)
|| (ret == IVD_MEM_ALLOC_FAILED)
|| (ret == ERROR_UNAVAIL_PICBUF_T)
|| (ret == ERROR_UNAVAIL_MVBUF_T))
{
break;
}
if((ret == ERROR_INCOMPLETE_FRAME) || (ret == ERROR_DANGLING_FIELD_IN_PIC))
{
ps_dec_op->u4_num_bytes_consumed -= bytes_consumed;
api_ret_value = IV_FAIL;
break;
}
if(ret == ERROR_IN_LAST_SLICE_OF_PIC)
{
api_ret_value = IV_FAIL;
break;
}
}
if(ps_dec->u4_return_to_app)
{
/*We have encountered a referenced frame,return to app*/
ps_dec_op->u4_num_bytes_consumed -= bytes_consumed;
ps_dec_op->u4_error_code = IVD_DEC_FRM_SKIPPED;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t);
/*signal the decode thread*/
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
return (IV_FAIL);
}
header_data_left = ((ps_dec->i4_decode_header == 1)
&& (ps_dec->i4_header_decoded != 3)
&& (ps_dec_op->u4_num_bytes_consumed
< ps_dec_ip->u4_num_Bytes));
frame_data_left = (((ps_dec->i4_decode_header == 0)
&& ((ps_dec->u1_pic_decode_done == 0)
|| (u4_next_is_aud == 1)))
&& (ps_dec_op->u4_num_bytes_consumed
< ps_dec_ip->u4_num_Bytes));
}
while(( header_data_left == 1)||(frame_data_left == 1));
if((ps_dec->u4_slice_start_code_found == 1)
&& (ret != IVD_MEM_ALLOC_FAILED)
&& ps_dec->u2_total_mbs_coded < ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs)
{
WORD32 num_mb_skipped;
WORD32 prev_slice_err;
pocstruct_t temp_poc;
WORD32 ret1;
num_mb_skipped = (ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs)
- ps_dec->u2_total_mbs_coded;
if(ps_dec->u4_first_slice_in_pic && (ps_dec->u4_pic_buf_got == 0))
prev_slice_err = 1;
else
prev_slice_err = 2;
ret1 = ih264d_mark_err_slice_skip(ps_dec, num_mb_skipped, ps_dec->u1_nal_unit_type == IDR_SLICE_NAL, ps_dec->ps_cur_slice->u2_frame_num,
&temp_poc, prev_slice_err);
if((ret1 == ERROR_UNAVAIL_PICBUF_T) || (ret1 == ERROR_UNAVAIL_MVBUF_T))
{
return IV_FAIL;
}
}
if((ret == IVD_RES_CHANGED)
|| (ret == IVD_MEM_ALLOC_FAILED)
|| (ret == ERROR_UNAVAIL_PICBUF_T)
|| (ret == ERROR_UNAVAIL_MVBUF_T))
{
/* signal the decode thread */
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet */
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
/* dont consume bitstream for change in resolution case */
if(ret == IVD_RES_CHANGED)
{
ps_dec_op->u4_num_bytes_consumed -= bytes_consumed;
}
return IV_FAIL;
}
if(ps_dec->u1_separate_parse)
{
/* If Format conversion is not complete,
complete it here */
if(ps_dec->u4_num_cores == 2)
{
/*do deblocking of all mbs*/
if((ps_dec->u4_nmb_deblk == 0) &&(ps_dec->u4_start_recon_deblk == 1) && (ps_dec->ps_cur_sps->u1_mb_aff_flag == 0))
{
UWORD32 u4_num_mbs,u4_max_addr;
tfr_ctxt_t s_tfr_ctxt;
tfr_ctxt_t *ps_tfr_cxt = &s_tfr_ctxt;
pad_mgr_t *ps_pad_mgr = &ps_dec->s_pad_mgr;
/*BS is done for all mbs while parsing*/
u4_max_addr = (ps_dec->u2_frm_wd_in_mbs * ps_dec->u2_frm_ht_in_mbs) - 1;
ps_dec->u4_cur_bs_mb_num = u4_max_addr + 1;
ih264d_init_deblk_tfr_ctxt(ps_dec, ps_pad_mgr, ps_tfr_cxt,
ps_dec->u2_frm_wd_in_mbs, 0);
u4_num_mbs = u4_max_addr
- ps_dec->u4_cur_deblk_mb_num + 1;
DEBUG_PERF_PRINTF("mbs left for deblocking= %d \n",u4_num_mbs);
if(u4_num_mbs != 0)
ih264d_check_mb_map_deblk(ps_dec, u4_num_mbs,
ps_tfr_cxt,1);
ps_dec->u4_start_recon_deblk = 0;
}
}
/*signal the decode thread*/
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
}
DATA_SYNC();
if((ps_dec_op->u4_error_code & 0xff)
!= ERROR_DYNAMIC_RESOLUTION_NOT_SUPPORTED)
{
ps_dec_op->u4_pic_wd = (UWORD32)ps_dec->u2_disp_width;
ps_dec_op->u4_pic_ht = (UWORD32)ps_dec->u2_disp_height;
}
if(ps_dec->i4_header_decoded != 3)
{
ps_dec_op->u4_error_code |= (1 << IVD_INSUFFICIENTDATA);
}
if(ps_dec->i4_decode_header == 1 && ps_dec->i4_header_decoded != 3)
{
ps_dec_op->u4_error_code |= (1 << IVD_INSUFFICIENTDATA);
}
if(ps_dec->u4_prev_nal_skipped)
{
/*We have encountered a referenced frame,return to app*/
ps_dec_op->u4_error_code = IVD_DEC_FRM_SKIPPED;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
return (IV_FAIL);
}
if((ps_dec->u4_slice_start_code_found == 1)
&& (ERROR_DANGLING_FIELD_IN_PIC != i4_err_status))
{
/*
* For field pictures, set the bottom and top picture decoded u4_flag correctly.
*/
if(ps_dec->ps_cur_slice->u1_field_pic_flag)
{
if(1 == ps_dec->ps_cur_slice->u1_bottom_field_flag)
{
ps_dec->u1_top_bottom_decoded |= BOT_FIELD_ONLY;
}
else
{
ps_dec->u1_top_bottom_decoded |= TOP_FIELD_ONLY;
}
}
/* if new frame in not found (if we are still getting slices from previous frame)
* ih264d_deblock_display is not called. Such frames will not be added to reference /display
*/
if((ps_dec->ps_dec_err_status->u1_err_flag & REJECT_CUR_PIC) == 0)
{
/* Calling Function to deblock Picture and Display */
ret = ih264d_deblock_display(ps_dec);
if(ret != 0)
{
return IV_FAIL;
}
}
/*set to complete ,as we dont support partial frame decode*/
if(ps_dec->i4_header_decoded == 3)
{
ps_dec->u2_total_mbs_coded = ps_dec->ps_cur_sps->u2_max_mb_addr + 1;
}
/*Update the i4_frametype at the end of picture*/
if(ps_dec->ps_cur_slice->u1_nal_unit_type == IDR_SLICE_NAL)
{
ps_dec->i4_frametype = IV_IDR_FRAME;
}
else if(ps_dec->i4_pic_type == B_SLICE)
{
ps_dec->i4_frametype = IV_B_FRAME;
}
else if(ps_dec->i4_pic_type == P_SLICE)
{
ps_dec->i4_frametype = IV_P_FRAME;
}
else if(ps_dec->i4_pic_type == I_SLICE)
{
ps_dec->i4_frametype = IV_I_FRAME;
}
else
{
H264_DEC_DEBUG_PRINT("Shouldn't come here\n");
}
ps_dec->i4_content_type = ps_dec->ps_cur_slice->u1_field_pic_flag;
ps_dec->u4_total_frames_decoded = ps_dec->u4_total_frames_decoded + 2;
ps_dec->u4_total_frames_decoded = ps_dec->u4_total_frames_decoded
- ps_dec->ps_cur_slice->u1_field_pic_flag;
}
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
{
/* In case the decoder is configured to run in low delay mode,
* then get display buffer and then format convert.
* Note in this mode, format conversion does not run paralelly in a thread and adds to the codec cycles
*/
if((IVD_DECODE_FRAME_OUT == ps_dec->e_frm_out_mode)
&& ps_dec->u1_init_dec_flag)
{
ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer,
&(ps_dec->s_disp_op));
if(0 == ps_dec->s_disp_op.u4_error_code)
{
ps_dec->u4_fmt_conv_cur_row = 0;
ps_dec->u4_output_present = 1;
}
}
ih264d_fill_output_struct_from_context(ps_dec, ps_dec_op);
/* If Format conversion is not complete,
complete it here */
if(ps_dec->u4_output_present &&
(ps_dec->u4_fmt_conv_cur_row < ps_dec->s_disp_frame_info.u4_y_ht))
{
ps_dec->u4_fmt_conv_num_rows = ps_dec->s_disp_frame_info.u4_y_ht
- ps_dec->u4_fmt_conv_cur_row;
ih264d_format_convert(ps_dec, &(ps_dec->s_disp_op),
ps_dec->u4_fmt_conv_cur_row,
ps_dec->u4_fmt_conv_num_rows);
ps_dec->u4_fmt_conv_cur_row += ps_dec->u4_fmt_conv_num_rows;
}
ih264d_release_display_field(ps_dec, &(ps_dec->s_disp_op));
}
if(ps_dec->i4_decode_header == 1 && (ps_dec->i4_header_decoded & 1) == 1)
{
ps_dec_op->u4_progressive_frame_flag = 1;
if((NULL != ps_dec->ps_cur_sps) && (1 == (ps_dec->ps_cur_sps->u1_is_valid)))
{
if((0 == ps_dec->ps_sps->u1_frame_mbs_only_flag)
&& (0 == ps_dec->ps_sps->u1_mb_aff_flag))
ps_dec_op->u4_progressive_frame_flag = 0;
}
}
/*Data memory barrier instruction,so that yuv write by the library is complete*/
DATA_SYNC();
H264_DEC_DEBUG_PRINT("The num bytes consumed: %d\n",
ps_dec_op->u4_num_bytes_consumed);
return api_ret_value;
}
Commit Message: Decoder: Initialize slice parameters before concealing error MBs
Also memset ps_dec_op structure to zero.
For error input, this ensures dimensions are initialized to zero
Bug: 28165661
Change-Id: I66eb2ddc5e02e74b7ff04da5f749443920f37141
CWE ID: CWE-20 | WORD32 ih264d_video_decode(iv_obj_t *dec_hdl, void *pv_api_ip, void *pv_api_op)
{
/* ! */
dec_struct_t * ps_dec = (dec_struct_t *)(dec_hdl->pv_codec_handle);
WORD32 i4_err_status = 0;
UWORD8 *pu1_buf = NULL;
WORD32 buflen;
UWORD32 u4_max_ofst, u4_length_of_start_code = 0;
UWORD32 bytes_consumed = 0;
UWORD32 cur_slice_is_nonref = 0;
UWORD32 u4_next_is_aud;
UWORD32 u4_first_start_code_found = 0;
WORD32 ret = 0,api_ret_value = IV_SUCCESS;
WORD32 header_data_left = 0,frame_data_left = 0;
UWORD8 *pu1_bitstrm_buf;
ivd_video_decode_ip_t *ps_dec_ip;
ivd_video_decode_op_t *ps_dec_op;
ithread_set_name((void*)"Parse_thread");
ps_dec_ip = (ivd_video_decode_ip_t *)pv_api_ip;
ps_dec_op = (ivd_video_decode_op_t *)pv_api_op;
{
UWORD32 u4_size;
u4_size = ps_dec_op->u4_size;
memset(ps_dec_op, 0, sizeof(ivd_video_decode_op_t));
ps_dec_op->u4_size = u4_size;
}
ps_dec->pv_dec_out = ps_dec_op;
if(ps_dec->init_done != 1)
{
return IV_FAIL;
}
/*Data memory barries instruction,so that bitstream write by the application is complete*/
DATA_SYNC();
if(0 == ps_dec->u1_flushfrm)
{
if(ps_dec_ip->pv_stream_buffer == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_FRM_BS_BUF_NULL;
return IV_FAIL;
}
if(ps_dec_ip->u4_num_Bytes <= 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DEC_NUMBYTES_INV;
return IV_FAIL;
}
}
ps_dec->u1_pic_decode_done = 0;
ps_dec_op->u4_num_bytes_consumed = 0;
ps_dec->ps_out_buffer = NULL;
if(ps_dec_ip->u4_size
>= offsetof(ivd_video_decode_ip_t, s_out_buffer))
ps_dec->ps_out_buffer = &ps_dec_ip->s_out_buffer;
ps_dec->u4_fmt_conv_cur_row = 0;
ps_dec->u4_output_present = 0;
ps_dec->s_disp_op.u4_error_code = 1;
ps_dec->u4_fmt_conv_num_rows = FMT_CONV_NUM_ROWS;
if(0 == ps_dec->u4_share_disp_buf
&& ps_dec->i4_decode_header == 0)
{
UWORD32 i;
if(ps_dec->ps_out_buffer->u4_num_bufs == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_ZERO_OP_BUFS;
return IV_FAIL;
}
for(i = 0; i < ps_dec->ps_out_buffer->u4_num_bufs; i++)
{
if(ps_dec->ps_out_buffer->pu1_bufs[i] == NULL)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |= IVD_DISP_FRM_OP_BUF_NULL;
return IV_FAIL;
}
if(ps_dec->ps_out_buffer->u4_min_out_buf_size[i] == 0)
{
ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM;
ps_dec_op->u4_error_code |=
IVD_DISP_FRM_ZERO_OP_BUF_SIZE;
return IV_FAIL;
}
}
}
if(ps_dec->u4_total_frames_decoded >= NUM_FRAMES_LIMIT)
{
ps_dec_op->u4_error_code = ERROR_FRAME_LIMIT_OVER;
return IV_FAIL;
}
/* ! */
ps_dec->u4_ts = ps_dec_ip->u4_ts;
ps_dec_op->u4_error_code = 0;
ps_dec_op->e_pic_type = -1;
ps_dec_op->u4_output_present = 0;
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec->i4_frametype = -1;
ps_dec->i4_content_type = -1;
/*
* For field pictures, set the bottom and top picture decoded u4_flag correctly.
*/
{
if((TOP_FIELD_ONLY | BOT_FIELD_ONLY) == ps_dec->u1_top_bottom_decoded)
{
ps_dec->u1_top_bottom_decoded = 0;
}
}
ps_dec->u4_slice_start_code_found = 0;
/* In case the deocder is not in flush mode(in shared mode),
then decoder has to pick up a buffer to write current frame.
Check if a frame is available in such cases */
if(ps_dec->u1_init_dec_flag == 1 && ps_dec->u4_share_disp_buf == 1
&& ps_dec->u1_flushfrm == 0)
{
UWORD32 i;
WORD32 disp_avail = 0, free_id;
/* Check if at least one buffer is available with the codec */
/* If not then return to application with error */
for(i = 0; i < ps_dec->u1_pic_bufs; i++)
{
if(0 == ps_dec->u4_disp_buf_mapping[i]
|| 1 == ps_dec->u4_disp_buf_to_be_freed[i])
{
disp_avail = 1;
break;
}
}
if(0 == disp_avail)
{
/* If something is queued for display wait for that buffer to be returned */
ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
return (IV_FAIL);
}
while(1)
{
pic_buffer_t *ps_pic_buf;
ps_pic_buf = (pic_buffer_t *)ih264_buf_mgr_get_next_free(
(buf_mgr_t *)ps_dec->pv_pic_buf_mgr, &free_id);
if(ps_pic_buf == NULL)
{
UWORD32 i, display_queued = 0;
/* check if any buffer was given for display which is not returned yet */
for(i = 0; i < (MAX_DISP_BUFS_NEW); i++)
{
if(0 != ps_dec->u4_disp_buf_mapping[i])
{
display_queued = 1;
break;
}
}
/* If some buffer is queued for display, then codec has to singal an error and wait
for that buffer to be returned.
If nothing is queued for display then codec has ownership of all display buffers
and it can reuse any of the existing buffers and continue decoding */
if(1 == display_queued)
{
/* If something is queued for display wait for that buffer to be returned */
ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL;
ps_dec_op->u4_error_code |= (1
<< IVD_UNSUPPORTEDPARAM);
return (IV_FAIL);
}
}
else
{
/* If the buffer is with display, then mark it as in use and then look for a buffer again */
if(1 == ps_dec->u4_disp_buf_mapping[free_id])
{
ih264_buf_mgr_set_status(
(buf_mgr_t *)ps_dec->pv_pic_buf_mgr,
free_id,
BUF_MGR_IO);
}
else
{
/**
* Found a free buffer for present call. Release it now.
* Will be again obtained later.
*/
ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_pic_buf_mgr,
free_id,
BUF_MGR_IO);
break;
}
}
}
}
if(ps_dec->u1_flushfrm && ps_dec->u1_init_dec_flag)
{
ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer,
&(ps_dec->s_disp_op));
if(0 == ps_dec->s_disp_op.u4_error_code)
{
ps_dec->u4_fmt_conv_cur_row = 0;
ps_dec->u4_fmt_conv_num_rows = ps_dec->s_disp_frame_info.u4_y_ht;
ih264d_format_convert(ps_dec, &(ps_dec->s_disp_op),
ps_dec->u4_fmt_conv_cur_row,
ps_dec->u4_fmt_conv_num_rows);
ps_dec->u4_fmt_conv_cur_row += ps_dec->u4_fmt_conv_num_rows;
ps_dec->u4_output_present = 1;
}
ih264d_release_display_field(ps_dec, &(ps_dec->s_disp_op));
ps_dec_op->u4_pic_wd = (UWORD32)ps_dec->u2_disp_width;
ps_dec_op->u4_pic_ht = (UWORD32)ps_dec->u2_disp_height;
ps_dec_op->u4_new_seq = 0;
ps_dec_op->u4_output_present = ps_dec->u4_output_present;
ps_dec_op->u4_progressive_frame_flag =
ps_dec->s_disp_op.u4_progressive_frame_flag;
ps_dec_op->e_output_format =
ps_dec->s_disp_op.e_output_format;
ps_dec_op->s_disp_frm_buf = ps_dec->s_disp_op.s_disp_frm_buf;
ps_dec_op->e4_fld_type = ps_dec->s_disp_op.e4_fld_type;
ps_dec_op->u4_ts = ps_dec->s_disp_op.u4_ts;
ps_dec_op->u4_disp_buf_id = ps_dec->s_disp_op.u4_disp_buf_id;
/*In the case of flush ,since no frame is decoded set pic type as invalid*/
ps_dec_op->u4_is_ref_flag = -1;
ps_dec_op->e_pic_type = IV_NA_FRAME;
ps_dec_op->u4_frame_decoded_flag = 0;
if(0 == ps_dec->s_disp_op.u4_error_code)
{
return (IV_SUCCESS);
}
else
return (IV_FAIL);
}
if(ps_dec->u1_res_changed == 1)
{
/*if resolution has changed and all buffers have been flushed, reset decoder*/
ih264d_init_decoder(ps_dec);
}
ps_dec->u4_prev_nal_skipped = 0;
ps_dec->u2_cur_mb_addr = 0;
ps_dec->u2_total_mbs_coded = 0;
ps_dec->u2_cur_slice_num = 0;
ps_dec->cur_dec_mb_num = 0;
ps_dec->cur_recon_mb_num = 0;
ps_dec->u4_first_slice_in_pic = 2;
ps_dec->u1_slice_header_done = 0;
ps_dec->u1_dangling_field = 0;
ps_dec->u4_dec_thread_created = 0;
ps_dec->u4_bs_deblk_thread_created = 0;
ps_dec->u4_cur_bs_mb_num = 0;
DEBUG_THREADS_PRINTF(" Starting process call\n");
ps_dec->u4_pic_buf_got = 0;
do
{
WORD32 buf_size;
pu1_buf = (UWORD8*)ps_dec_ip->pv_stream_buffer
+ ps_dec_op->u4_num_bytes_consumed;
u4_max_ofst = ps_dec_ip->u4_num_Bytes
- ps_dec_op->u4_num_bytes_consumed;
/* If dynamic bitstream buffer is not allocated and
* header decode is done, then allocate dynamic bitstream buffer
*/
if((NULL == ps_dec->pu1_bits_buf_dynamic) &&
(ps_dec->i4_header_decoded & 1))
{
WORD32 size;
void *pv_buf;
void *pv_mem_ctxt = ps_dec->pv_mem_ctxt;
size = MAX(256000, ps_dec->u2_pic_wd * ps_dec->u2_pic_ht * 3 / 2);
pv_buf = ps_dec->pf_aligned_alloc(pv_mem_ctxt, 128, size);
RETURN_IF((NULL == pv_buf), IV_FAIL);
ps_dec->pu1_bits_buf_dynamic = pv_buf;
ps_dec->u4_dynamic_bits_buf_size = size;
}
if(ps_dec->pu1_bits_buf_dynamic)
{
pu1_bitstrm_buf = ps_dec->pu1_bits_buf_dynamic;
buf_size = ps_dec->u4_dynamic_bits_buf_size;
}
else
{
pu1_bitstrm_buf = ps_dec->pu1_bits_buf_static;
buf_size = ps_dec->u4_static_bits_buf_size;
}
u4_next_is_aud = 0;
buflen = ih264d_find_start_code(pu1_buf, 0, u4_max_ofst,
&u4_length_of_start_code,
&u4_next_is_aud);
if(buflen == -1)
buflen = 0;
/* Ignore bytes beyond the allocated size of intermediate buffer */
buflen = MIN(buflen, buf_size);
bytes_consumed = buflen + u4_length_of_start_code;
ps_dec_op->u4_num_bytes_consumed += bytes_consumed;
{
UWORD8 u1_firstbyte, u1_nal_ref_idc;
if(ps_dec->i4_app_skip_mode == IVD_SKIP_B)
{
u1_firstbyte = *(pu1_buf + u4_length_of_start_code);
u1_nal_ref_idc = (UWORD8)(NAL_REF_IDC(u1_firstbyte));
if(u1_nal_ref_idc == 0)
{
/*skip non reference frames*/
cur_slice_is_nonref = 1;
continue;
}
else
{
if(1 == cur_slice_is_nonref)
{
/*We have encountered a referenced frame,return to app*/
ps_dec_op->u4_num_bytes_consumed -=
bytes_consumed;
ps_dec_op->e_pic_type = IV_B_FRAME;
ps_dec_op->u4_error_code =
IVD_DEC_FRM_SKIPPED;
ps_dec_op->u4_error_code |= (1
<< IVD_UNSUPPORTEDPARAM);
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec_op->u4_size =
sizeof(ivd_video_decode_op_t);
/*signal the decode thread*/
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
return (IV_FAIL);
}
}
}
}
if(buflen)
{
memcpy(pu1_bitstrm_buf, pu1_buf + u4_length_of_start_code,
buflen);
/* Decoder may read extra 8 bytes near end of the frame */
if((buflen + 8) < buf_size)
{
memset(pu1_bitstrm_buf + buflen, 0, 8);
}
u4_first_start_code_found = 1;
}
else
{
/*start code not found*/
if(u4_first_start_code_found == 0)
{
/*no start codes found in current process call*/
ps_dec->i4_error_code = ERROR_START_CODE_NOT_FOUND;
ps_dec_op->u4_error_code |= 1 << IVD_INSUFFICIENTDATA;
if(ps_dec->u4_pic_buf_got == 0)
{
ih264d_fill_output_struct_from_context(ps_dec,
ps_dec_op);
ps_dec_op->u4_error_code = ps_dec->i4_error_code;
ps_dec_op->u4_frame_decoded_flag = 0;
return (IV_FAIL);
}
else
{
ps_dec->u1_pic_decode_done = 1;
continue;
}
}
else
{
/* a start code has already been found earlier in the same process call*/
frame_data_left = 0;
continue;
}
}
ps_dec->u4_return_to_app = 0;
ret = ih264d_parse_nal_unit(dec_hdl, ps_dec_op,
pu1_bitstrm_buf, buflen);
if(ret != OK)
{
UWORD32 error = ih264d_map_error(ret);
ps_dec_op->u4_error_code = error | ret;
api_ret_value = IV_FAIL;
if((ret == IVD_RES_CHANGED)
|| (ret == IVD_MEM_ALLOC_FAILED)
|| (ret == ERROR_UNAVAIL_PICBUF_T)
|| (ret == ERROR_UNAVAIL_MVBUF_T))
{
break;
}
if((ret == ERROR_INCOMPLETE_FRAME) || (ret == ERROR_DANGLING_FIELD_IN_PIC))
{
ps_dec_op->u4_num_bytes_consumed -= bytes_consumed;
api_ret_value = IV_FAIL;
break;
}
if(ret == ERROR_IN_LAST_SLICE_OF_PIC)
{
api_ret_value = IV_FAIL;
break;
}
}
if(ps_dec->u4_return_to_app)
{
/*We have encountered a referenced frame,return to app*/
ps_dec_op->u4_num_bytes_consumed -= bytes_consumed;
ps_dec_op->u4_error_code = IVD_DEC_FRM_SKIPPED;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t);
/*signal the decode thread*/
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
return (IV_FAIL);
}
header_data_left = ((ps_dec->i4_decode_header == 1)
&& (ps_dec->i4_header_decoded != 3)
&& (ps_dec_op->u4_num_bytes_consumed
< ps_dec_ip->u4_num_Bytes));
frame_data_left = (((ps_dec->i4_decode_header == 0)
&& ((ps_dec->u1_pic_decode_done == 0)
|| (u4_next_is_aud == 1)))
&& (ps_dec_op->u4_num_bytes_consumed
< ps_dec_ip->u4_num_Bytes));
}
while(( header_data_left == 1)||(frame_data_left == 1));
if((ps_dec->u4_slice_start_code_found == 1)
&& (ret != IVD_MEM_ALLOC_FAILED)
&& ps_dec->u2_total_mbs_coded < ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs)
{
WORD32 num_mb_skipped;
WORD32 prev_slice_err;
pocstruct_t temp_poc;
WORD32 ret1;
num_mb_skipped = (ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs)
- ps_dec->u2_total_mbs_coded;
if(ps_dec->u4_first_slice_in_pic && (ps_dec->u4_pic_buf_got == 0))
prev_slice_err = 1;
else
prev_slice_err = 2;
ret1 = ih264d_mark_err_slice_skip(ps_dec, num_mb_skipped, ps_dec->u1_nal_unit_type == IDR_SLICE_NAL, ps_dec->ps_cur_slice->u2_frame_num,
&temp_poc, prev_slice_err);
if((ret1 == ERROR_UNAVAIL_PICBUF_T) || (ret1 == ERROR_UNAVAIL_MVBUF_T))
{
return IV_FAIL;
}
}
if((ret == IVD_RES_CHANGED)
|| (ret == IVD_MEM_ALLOC_FAILED)
|| (ret == ERROR_UNAVAIL_PICBUF_T)
|| (ret == ERROR_UNAVAIL_MVBUF_T))
{
/* signal the decode thread */
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet */
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
/* dont consume bitstream for change in resolution case */
if(ret == IVD_RES_CHANGED)
{
ps_dec_op->u4_num_bytes_consumed -= bytes_consumed;
}
return IV_FAIL;
}
if(ps_dec->u1_separate_parse)
{
/* If Format conversion is not complete,
complete it here */
if(ps_dec->u4_num_cores == 2)
{
/*do deblocking of all mbs*/
if((ps_dec->u4_nmb_deblk == 0) &&(ps_dec->u4_start_recon_deblk == 1) && (ps_dec->ps_cur_sps->u1_mb_aff_flag == 0))
{
UWORD32 u4_num_mbs,u4_max_addr;
tfr_ctxt_t s_tfr_ctxt;
tfr_ctxt_t *ps_tfr_cxt = &s_tfr_ctxt;
pad_mgr_t *ps_pad_mgr = &ps_dec->s_pad_mgr;
/*BS is done for all mbs while parsing*/
u4_max_addr = (ps_dec->u2_frm_wd_in_mbs * ps_dec->u2_frm_ht_in_mbs) - 1;
ps_dec->u4_cur_bs_mb_num = u4_max_addr + 1;
ih264d_init_deblk_tfr_ctxt(ps_dec, ps_pad_mgr, ps_tfr_cxt,
ps_dec->u2_frm_wd_in_mbs, 0);
u4_num_mbs = u4_max_addr
- ps_dec->u4_cur_deblk_mb_num + 1;
DEBUG_PERF_PRINTF("mbs left for deblocking= %d \n",u4_num_mbs);
if(u4_num_mbs != 0)
ih264d_check_mb_map_deblk(ps_dec, u4_num_mbs,
ps_tfr_cxt,1);
ps_dec->u4_start_recon_deblk = 0;
}
}
/*signal the decode thread*/
ih264d_signal_decode_thread(ps_dec);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
}
DATA_SYNC();
if((ps_dec_op->u4_error_code & 0xff)
!= ERROR_DYNAMIC_RESOLUTION_NOT_SUPPORTED)
{
ps_dec_op->u4_pic_wd = (UWORD32)ps_dec->u2_disp_width;
ps_dec_op->u4_pic_ht = (UWORD32)ps_dec->u2_disp_height;
}
if(ps_dec->i4_header_decoded != 3)
{
ps_dec_op->u4_error_code |= (1 << IVD_INSUFFICIENTDATA);
}
if(ps_dec->i4_decode_header == 1 && ps_dec->i4_header_decoded != 3)
{
ps_dec_op->u4_error_code |= (1 << IVD_INSUFFICIENTDATA);
}
if(ps_dec->u4_prev_nal_skipped)
{
/*We have encountered a referenced frame,return to app*/
ps_dec_op->u4_error_code = IVD_DEC_FRM_SKIPPED;
ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM);
ps_dec_op->u4_frame_decoded_flag = 0;
ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t);
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
return (IV_FAIL);
}
if((ps_dec->u4_slice_start_code_found == 1)
&& (ERROR_DANGLING_FIELD_IN_PIC != i4_err_status))
{
/*
* For field pictures, set the bottom and top picture decoded u4_flag correctly.
*/
if(ps_dec->ps_cur_slice->u1_field_pic_flag)
{
if(1 == ps_dec->ps_cur_slice->u1_bottom_field_flag)
{
ps_dec->u1_top_bottom_decoded |= BOT_FIELD_ONLY;
}
else
{
ps_dec->u1_top_bottom_decoded |= TOP_FIELD_ONLY;
}
}
/* if new frame in not found (if we are still getting slices from previous frame)
* ih264d_deblock_display is not called. Such frames will not be added to reference /display
*/
if((ps_dec->ps_dec_err_status->u1_err_flag & REJECT_CUR_PIC) == 0)
{
/* Calling Function to deblock Picture and Display */
ret = ih264d_deblock_display(ps_dec);
if(ret != 0)
{
return IV_FAIL;
}
}
/*set to complete ,as we dont support partial frame decode*/
if(ps_dec->i4_header_decoded == 3)
{
ps_dec->u2_total_mbs_coded = ps_dec->ps_cur_sps->u2_max_mb_addr + 1;
}
/*Update the i4_frametype at the end of picture*/
if(ps_dec->ps_cur_slice->u1_nal_unit_type == IDR_SLICE_NAL)
{
ps_dec->i4_frametype = IV_IDR_FRAME;
}
else if(ps_dec->i4_pic_type == B_SLICE)
{
ps_dec->i4_frametype = IV_B_FRAME;
}
else if(ps_dec->i4_pic_type == P_SLICE)
{
ps_dec->i4_frametype = IV_P_FRAME;
}
else if(ps_dec->i4_pic_type == I_SLICE)
{
ps_dec->i4_frametype = IV_I_FRAME;
}
else
{
H264_DEC_DEBUG_PRINT("Shouldn't come here\n");
}
ps_dec->i4_content_type = ps_dec->ps_cur_slice->u1_field_pic_flag;
ps_dec->u4_total_frames_decoded = ps_dec->u4_total_frames_decoded + 2;
ps_dec->u4_total_frames_decoded = ps_dec->u4_total_frames_decoded
- ps_dec->ps_cur_slice->u1_field_pic_flag;
}
/* close deblock thread if it is not closed yet*/
if(ps_dec->u4_num_cores == 3)
{
ih264d_signal_bs_deblk_thread(ps_dec);
}
{
/* In case the decoder is configured to run in low delay mode,
* then get display buffer and then format convert.
* Note in this mode, format conversion does not run paralelly in a thread and adds to the codec cycles
*/
if((IVD_DECODE_FRAME_OUT == ps_dec->e_frm_out_mode)
&& ps_dec->u1_init_dec_flag)
{
ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer,
&(ps_dec->s_disp_op));
if(0 == ps_dec->s_disp_op.u4_error_code)
{
ps_dec->u4_fmt_conv_cur_row = 0;
ps_dec->u4_output_present = 1;
}
}
ih264d_fill_output_struct_from_context(ps_dec, ps_dec_op);
/* If Format conversion is not complete,
complete it here */
if(ps_dec->u4_output_present &&
(ps_dec->u4_fmt_conv_cur_row < ps_dec->s_disp_frame_info.u4_y_ht))
{
ps_dec->u4_fmt_conv_num_rows = ps_dec->s_disp_frame_info.u4_y_ht
- ps_dec->u4_fmt_conv_cur_row;
ih264d_format_convert(ps_dec, &(ps_dec->s_disp_op),
ps_dec->u4_fmt_conv_cur_row,
ps_dec->u4_fmt_conv_num_rows);
ps_dec->u4_fmt_conv_cur_row += ps_dec->u4_fmt_conv_num_rows;
}
ih264d_release_display_field(ps_dec, &(ps_dec->s_disp_op));
}
if(ps_dec->i4_decode_header == 1 && (ps_dec->i4_header_decoded & 1) == 1)
{
ps_dec_op->u4_progressive_frame_flag = 1;
if((NULL != ps_dec->ps_cur_sps) && (1 == (ps_dec->ps_cur_sps->u1_is_valid)))
{
if((0 == ps_dec->ps_sps->u1_frame_mbs_only_flag)
&& (0 == ps_dec->ps_sps->u1_mb_aff_flag))
ps_dec_op->u4_progressive_frame_flag = 0;
}
}
/*Data memory barrier instruction,so that yuv write by the library is complete*/
DATA_SYNC();
H264_DEC_DEBUG_PRINT("The num bytes consumed: %d\n",
ps_dec_op->u4_num_bytes_consumed);
return api_ret_value;
}
| 174,162 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: base::string16 FormatBookmarkURLForDisplay(const GURL& url) {
return url_formatter::FormatUrl(
url, url_formatter::kFormatUrlOmitAll &
~url_formatter::kFormatUrlOmitUsernamePassword,
net::UnescapeRule::SPACES, nullptr, nullptr, nullptr);
}
Commit Message: Prevent interpretating userinfo as url scheme when editing bookmarks
Chrome's Edit Bookmark dialog formats urls for display such that a
url of http://javascript:[email protected] is later converted to a
javascript url scheme, allowing persistence of a script injection
attack within the user's bookmarks.
This fix prevents such misinterpretations by always showing the
scheme when a userinfo component is present within the url.
BUG=639126
Review-Url: https://codereview.chromium.org/2368593002
Cr-Commit-Position: refs/heads/master@{#422467}
CWE ID: CWE-79 | base::string16 FormatBookmarkURLForDisplay(const GURL& url) {
// and trailing slash, and unescape most characters. However, it's
url_formatter::FormatUrlTypes format_types =
url_formatter::kFormatUrlOmitAll &
~url_formatter::kFormatUrlOmitUsernamePassword;
// If username is present, we must not omit the scheme because FixupURL() will
// subsequently interpret the username as a scheme. crbug.com/639126
if (url.has_username())
format_types &= ~url_formatter::kFormatUrlOmitHTTP;
return url_formatter::FormatUrl(url, format_types, net::UnescapeRule::SPACES,
nullptr, nullptr, nullptr);
}
| 172,103 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int get_siz(Jpeg2000DecoderContext *s)
{
int i;
int ncomponents;
uint32_t log2_chroma_wh = 0;
const enum AVPixelFormat *possible_fmts = NULL;
int possible_fmts_nb = 0;
if (bytestream2_get_bytes_left(&s->g) < 36)
return AVERROR_INVALIDDATA;
s->avctx->profile = bytestream2_get_be16u(&s->g); // Rsiz
s->width = bytestream2_get_be32u(&s->g); // Width
s->height = bytestream2_get_be32u(&s->g); // Height
s->image_offset_x = bytestream2_get_be32u(&s->g); // X0Siz
s->image_offset_y = bytestream2_get_be32u(&s->g); // Y0Siz
s->tile_width = bytestream2_get_be32u(&s->g); // XTSiz
s->tile_height = bytestream2_get_be32u(&s->g); // YTSiz
s->tile_offset_x = bytestream2_get_be32u(&s->g); // XT0Siz
s->tile_offset_y = bytestream2_get_be32u(&s->g); // YT0Siz
ncomponents = bytestream2_get_be16u(&s->g); // CSiz
if (ncomponents <= 0) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid number of components: %d\n",
s->ncomponents);
return AVERROR_INVALIDDATA;
}
if (ncomponents > 4) {
avpriv_request_sample(s->avctx, "Support for %d components",
s->ncomponents);
return AVERROR_PATCHWELCOME;
}
s->ncomponents = ncomponents;
if (s->tile_width <= 0 || s->tile_height <= 0) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid tile dimension %dx%d.\n",
s->tile_width, s->tile_height);
return AVERROR_INVALIDDATA;
}
if (bytestream2_get_bytes_left(&s->g) < 3 * s->ncomponents)
return AVERROR_INVALIDDATA;
for (i = 0; i < s->ncomponents; i++) { // Ssiz_i XRsiz_i, YRsiz_i
uint8_t x = bytestream2_get_byteu(&s->g);
s->cbps[i] = (x & 0x7f) + 1;
s->precision = FFMAX(s->cbps[i], s->precision);
s->sgnd[i] = !!(x & 0x80);
s->cdx[i] = bytestream2_get_byteu(&s->g);
s->cdy[i] = bytestream2_get_byteu(&s->g);
if (!s->cdx[i] || !s->cdy[i]) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid sample seperation\n");
return AVERROR_INVALIDDATA;
}
log2_chroma_wh |= s->cdy[i] >> 1 << i * 4 | s->cdx[i] >> 1 << i * 4 + 2;
}
s->numXtiles = ff_jpeg2000_ceildiv(s->width - s->tile_offset_x, s->tile_width);
s->numYtiles = ff_jpeg2000_ceildiv(s->height - s->tile_offset_y, s->tile_height);
if (s->numXtiles * (uint64_t)s->numYtiles > INT_MAX/sizeof(*s->tile)) {
s->numXtiles = s->numYtiles = 0;
return AVERROR(EINVAL);
}
s->tile = av_mallocz_array(s->numXtiles * s->numYtiles, sizeof(*s->tile));
if (!s->tile) {
s->numXtiles = s->numYtiles = 0;
return AVERROR(ENOMEM);
}
for (i = 0; i < s->numXtiles * s->numYtiles; i++) {
Jpeg2000Tile *tile = s->tile + i;
tile->comp = av_mallocz(s->ncomponents * sizeof(*tile->comp));
if (!tile->comp)
return AVERROR(ENOMEM);
}
/* compute image size with reduction factor */
s->avctx->width = ff_jpeg2000_ceildivpow2(s->width - s->image_offset_x,
s->reduction_factor);
s->avctx->height = ff_jpeg2000_ceildivpow2(s->height - s->image_offset_y,
s->reduction_factor);
if (s->avctx->profile == FF_PROFILE_JPEG2000_DCINEMA_2K ||
s->avctx->profile == FF_PROFILE_JPEG2000_DCINEMA_4K) {
possible_fmts = xyz_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(xyz_pix_fmts);
} else {
switch (s->colour_space) {
case 16:
possible_fmts = rgb_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(rgb_pix_fmts);
break;
case 17:
possible_fmts = gray_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(gray_pix_fmts);
break;
case 18:
possible_fmts = yuv_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(yuv_pix_fmts);
break;
default:
possible_fmts = all_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(all_pix_fmts);
break;
}
}
for (i = 0; i < possible_fmts_nb; ++i) {
if (pix_fmt_match(possible_fmts[i], ncomponents, s->precision, log2_chroma_wh, s->pal8)) {
s->avctx->pix_fmt = possible_fmts[i];
break;
}
}
if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
av_log(s->avctx, AV_LOG_ERROR,
"Unknown pix_fmt, profile: %d, colour_space: %d, "
"components: %d, precision: %d, "
"cdx[1]: %d, cdy[1]: %d, cdx[2]: %d, cdy[2]: %d\n",
s->avctx->profile, s->colour_space, ncomponents, s->precision,
ncomponents > 2 ? s->cdx[1] : 0,
ncomponents > 2 ? s->cdy[1] : 0,
ncomponents > 2 ? s->cdx[2] : 0,
ncomponents > 2 ? s->cdy[2] : 0);
}
return 0;
}
Commit Message: avcodec/jpeg2000dec: Check cdx/y values more carefully
Some invalid values where not handled correctly in the later pixel
format matching code.
Fixes out of array accesses
Fixes Ticket2848
Found-by: Piotr Bandurski <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]>
CWE ID: CWE-119 | static int get_siz(Jpeg2000DecoderContext *s)
{
int i;
int ncomponents;
uint32_t log2_chroma_wh = 0;
const enum AVPixelFormat *possible_fmts = NULL;
int possible_fmts_nb = 0;
if (bytestream2_get_bytes_left(&s->g) < 36)
return AVERROR_INVALIDDATA;
s->avctx->profile = bytestream2_get_be16u(&s->g); // Rsiz
s->width = bytestream2_get_be32u(&s->g); // Width
s->height = bytestream2_get_be32u(&s->g); // Height
s->image_offset_x = bytestream2_get_be32u(&s->g); // X0Siz
s->image_offset_y = bytestream2_get_be32u(&s->g); // Y0Siz
s->tile_width = bytestream2_get_be32u(&s->g); // XTSiz
s->tile_height = bytestream2_get_be32u(&s->g); // YTSiz
s->tile_offset_x = bytestream2_get_be32u(&s->g); // XT0Siz
s->tile_offset_y = bytestream2_get_be32u(&s->g); // YT0Siz
ncomponents = bytestream2_get_be16u(&s->g); // CSiz
if (ncomponents <= 0) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid number of components: %d\n",
s->ncomponents);
return AVERROR_INVALIDDATA;
}
if (ncomponents > 4) {
avpriv_request_sample(s->avctx, "Support for %d components",
s->ncomponents);
return AVERROR_PATCHWELCOME;
}
s->ncomponents = ncomponents;
if (s->tile_width <= 0 || s->tile_height <= 0) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid tile dimension %dx%d.\n",
s->tile_width, s->tile_height);
return AVERROR_INVALIDDATA;
}
if (bytestream2_get_bytes_left(&s->g) < 3 * s->ncomponents)
return AVERROR_INVALIDDATA;
for (i = 0; i < s->ncomponents; i++) { // Ssiz_i XRsiz_i, YRsiz_i
uint8_t x = bytestream2_get_byteu(&s->g);
s->cbps[i] = (x & 0x7f) + 1;
s->precision = FFMAX(s->cbps[i], s->precision);
s->sgnd[i] = !!(x & 0x80);
s->cdx[i] = bytestream2_get_byteu(&s->g);
s->cdy[i] = bytestream2_get_byteu(&s->g);
if ( !s->cdx[i] || s->cdx[i] == 3 || s->cdx[i] > 4
|| !s->cdy[i] || s->cdy[i] == 3 || s->cdy[i] > 4) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid sample seperation\n");
return AVERROR_INVALIDDATA;
}
log2_chroma_wh |= s->cdy[i] >> 1 << i * 4 | s->cdx[i] >> 1 << i * 4 + 2;
}
s->numXtiles = ff_jpeg2000_ceildiv(s->width - s->tile_offset_x, s->tile_width);
s->numYtiles = ff_jpeg2000_ceildiv(s->height - s->tile_offset_y, s->tile_height);
if (s->numXtiles * (uint64_t)s->numYtiles > INT_MAX/sizeof(*s->tile)) {
s->numXtiles = s->numYtiles = 0;
return AVERROR(EINVAL);
}
s->tile = av_mallocz_array(s->numXtiles * s->numYtiles, sizeof(*s->tile));
if (!s->tile) {
s->numXtiles = s->numYtiles = 0;
return AVERROR(ENOMEM);
}
for (i = 0; i < s->numXtiles * s->numYtiles; i++) {
Jpeg2000Tile *tile = s->tile + i;
tile->comp = av_mallocz(s->ncomponents * sizeof(*tile->comp));
if (!tile->comp)
return AVERROR(ENOMEM);
}
/* compute image size with reduction factor */
s->avctx->width = ff_jpeg2000_ceildivpow2(s->width - s->image_offset_x,
s->reduction_factor);
s->avctx->height = ff_jpeg2000_ceildivpow2(s->height - s->image_offset_y,
s->reduction_factor);
if (s->avctx->profile == FF_PROFILE_JPEG2000_DCINEMA_2K ||
s->avctx->profile == FF_PROFILE_JPEG2000_DCINEMA_4K) {
possible_fmts = xyz_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(xyz_pix_fmts);
} else {
switch (s->colour_space) {
case 16:
possible_fmts = rgb_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(rgb_pix_fmts);
break;
case 17:
possible_fmts = gray_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(gray_pix_fmts);
break;
case 18:
possible_fmts = yuv_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(yuv_pix_fmts);
break;
default:
possible_fmts = all_pix_fmts;
possible_fmts_nb = FF_ARRAY_ELEMS(all_pix_fmts);
break;
}
}
for (i = 0; i < possible_fmts_nb; ++i) {
if (pix_fmt_match(possible_fmts[i], ncomponents, s->precision, log2_chroma_wh, s->pal8)) {
s->avctx->pix_fmt = possible_fmts[i];
break;
}
}
if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
av_log(s->avctx, AV_LOG_ERROR,
"Unknown pix_fmt, profile: %d, colour_space: %d, "
"components: %d, precision: %d, "
"cdx[1]: %d, cdy[1]: %d, cdx[2]: %d, cdy[2]: %d\n",
s->avctx->profile, s->colour_space, ncomponents, s->precision,
ncomponents > 2 ? s->cdx[1] : 0,
ncomponents > 2 ? s->cdy[1] : 0,
ncomponents > 2 ? s->cdx[2] : 0,
ncomponents > 2 ? s->cdy[2] : 0);
}
return 0;
}
| 165,923 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: destroy_one_secret (gpointer data)
{
char *secret = (char *) data;
/* Don't leave the secret lying around in memory */
g_message ("%s: destroying %s", __func__, secret);
memset (secret, 0, strlen (secret));
g_free (secret);
}
Commit Message:
CWE ID: CWE-200 | destroy_one_secret (gpointer data)
{
char *secret = (char *) data;
/* Don't leave the secret lying around in memory */
memset (secret, 0, strlen (secret));
g_free (secret);
}
| 164,689 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
{
BN_ULONG t1,t2;
BN_ULONG c1,c2,c3;
c1=0;
c2=0;
c3=0;
mul_add_c(a[0],b[0],c1,c2,c3);
r[0]=c1;
c1=0;
mul_add_c(a[0],b[1],c2,c3,c1);
mul_add_c(a[1],b[0],c2,c3,c1);
r[1]=c2;
c2=0;
mul_add_c(a[2],b[0],c3,c1,c2);
mul_add_c(a[1],b[1],c3,c1,c2);
mul_add_c(a[0],b[2],c3,c1,c2);
r[2]=c3;
c3=0;
mul_add_c(a[0],b[3],c1,c2,c3);
mul_add_c(a[1],b[2],c1,c2,c3);
mul_add_c(a[2],b[1],c1,c2,c3);
mul_add_c(a[3],b[0],c1,c2,c3);
r[3]=c1;
c1=0;
mul_add_c(a[4],b[0],c2,c3,c1);
mul_add_c(a[3],b[1],c2,c3,c1);
mul_add_c(a[2],b[2],c2,c3,c1);
mul_add_c(a[1],b[3],c2,c3,c1);
mul_add_c(a[0],b[4],c2,c3,c1);
r[4]=c2;
c2=0;
mul_add_c(a[0],b[5],c3,c1,c2);
mul_add_c(a[1],b[4],c3,c1,c2);
mul_add_c(a[2],b[3],c3,c1,c2);
mul_add_c(a[3],b[2],c3,c1,c2);
mul_add_c(a[4],b[1],c3,c1,c2);
mul_add_c(a[5],b[0],c3,c1,c2);
r[5]=c3;
c3=0;
mul_add_c(a[6],b[0],c1,c2,c3);
mul_add_c(a[5],b[1],c1,c2,c3);
mul_add_c(a[4],b[2],c1,c2,c3);
mul_add_c(a[3],b[3],c1,c2,c3);
mul_add_c(a[2],b[4],c1,c2,c3);
mul_add_c(a[1],b[5],c1,c2,c3);
mul_add_c(a[0],b[6],c1,c2,c3);
r[6]=c1;
c1=0;
mul_add_c(a[0],b[7],c2,c3,c1);
mul_add_c(a[1],b[6],c2,c3,c1);
mul_add_c(a[2],b[5],c2,c3,c1);
mul_add_c(a[3],b[4],c2,c3,c1);
mul_add_c(a[4],b[3],c2,c3,c1);
mul_add_c(a[5],b[2],c2,c3,c1);
mul_add_c(a[6],b[1],c2,c3,c1);
mul_add_c(a[7],b[0],c2,c3,c1);
r[7]=c2;
c2=0;
mul_add_c(a[7],b[1],c3,c1,c2);
mul_add_c(a[6],b[2],c3,c1,c2);
mul_add_c(a[5],b[3],c3,c1,c2);
mul_add_c(a[4],b[4],c3,c1,c2);
mul_add_c(a[3],b[5],c3,c1,c2);
mul_add_c(a[2],b[6],c3,c1,c2);
mul_add_c(a[1],b[7],c3,c1,c2);
r[8]=c3;
c3=0;
mul_add_c(a[2],b[7],c1,c2,c3);
mul_add_c(a[3],b[6],c1,c2,c3);
mul_add_c(a[4],b[5],c1,c2,c3);
mul_add_c(a[5],b[4],c1,c2,c3);
mul_add_c(a[6],b[3],c1,c2,c3);
mul_add_c(a[7],b[2],c1,c2,c3);
r[9]=c1;
c1=0;
mul_add_c(a[7],b[3],c2,c3,c1);
mul_add_c(a[6],b[4],c2,c3,c1);
mul_add_c(a[5],b[5],c2,c3,c1);
mul_add_c(a[4],b[6],c2,c3,c1);
mul_add_c(a[3],b[7],c2,c3,c1);
r[10]=c2;
c2=0;
mul_add_c(a[4],b[7],c3,c1,c2);
mul_add_c(a[5],b[6],c3,c1,c2);
mul_add_c(a[6],b[5],c3,c1,c2);
mul_add_c(a[7],b[4],c3,c1,c2);
r[11]=c3;
c3=0;
mul_add_c(a[7],b[5],c1,c2,c3);
mul_add_c(a[6],b[6],c1,c2,c3);
mul_add_c(a[5],b[7],c1,c2,c3);
r[12]=c1;
c1=0;
mul_add_c(a[6],b[7],c2,c3,c1);
mul_add_c(a[7],b[6],c2,c3,c1);
r[13]=c2;
c2=0;
mul_add_c(a[7],b[7],c3,c1,c2);
r[14]=c3;
r[15]=c1;
}
Commit Message: Fix for CVE-2014-3570 (with minor bn_asm.c revamp).
Reviewed-by: Emilia Kasper <[email protected]>
CWE ID: CWE-310 | void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
{
BN_ULONG c1,c2,c3;
c1=0;
c2=0;
c3=0;
mul_add_c(a[0],b[0],c1,c2,c3);
r[0]=c1;
c1=0;
mul_add_c(a[0],b[1],c2,c3,c1);
mul_add_c(a[1],b[0],c2,c3,c1);
r[1]=c2;
c2=0;
mul_add_c(a[2],b[0],c3,c1,c2);
mul_add_c(a[1],b[1],c3,c1,c2);
mul_add_c(a[0],b[2],c3,c1,c2);
r[2]=c3;
c3=0;
mul_add_c(a[0],b[3],c1,c2,c3);
mul_add_c(a[1],b[2],c1,c2,c3);
mul_add_c(a[2],b[1],c1,c2,c3);
mul_add_c(a[3],b[0],c1,c2,c3);
r[3]=c1;
c1=0;
mul_add_c(a[4],b[0],c2,c3,c1);
mul_add_c(a[3],b[1],c2,c3,c1);
mul_add_c(a[2],b[2],c2,c3,c1);
mul_add_c(a[1],b[3],c2,c3,c1);
mul_add_c(a[0],b[4],c2,c3,c1);
r[4]=c2;
c2=0;
mul_add_c(a[0],b[5],c3,c1,c2);
mul_add_c(a[1],b[4],c3,c1,c2);
mul_add_c(a[2],b[3],c3,c1,c2);
mul_add_c(a[3],b[2],c3,c1,c2);
mul_add_c(a[4],b[1],c3,c1,c2);
mul_add_c(a[5],b[0],c3,c1,c2);
r[5]=c3;
c3=0;
mul_add_c(a[6],b[0],c1,c2,c3);
mul_add_c(a[5],b[1],c1,c2,c3);
mul_add_c(a[4],b[2],c1,c2,c3);
mul_add_c(a[3],b[3],c1,c2,c3);
mul_add_c(a[2],b[4],c1,c2,c3);
mul_add_c(a[1],b[5],c1,c2,c3);
mul_add_c(a[0],b[6],c1,c2,c3);
r[6]=c1;
c1=0;
mul_add_c(a[0],b[7],c2,c3,c1);
mul_add_c(a[1],b[6],c2,c3,c1);
mul_add_c(a[2],b[5],c2,c3,c1);
mul_add_c(a[3],b[4],c2,c3,c1);
mul_add_c(a[4],b[3],c2,c3,c1);
mul_add_c(a[5],b[2],c2,c3,c1);
mul_add_c(a[6],b[1],c2,c3,c1);
mul_add_c(a[7],b[0],c2,c3,c1);
r[7]=c2;
c2=0;
mul_add_c(a[7],b[1],c3,c1,c2);
mul_add_c(a[6],b[2],c3,c1,c2);
mul_add_c(a[5],b[3],c3,c1,c2);
mul_add_c(a[4],b[4],c3,c1,c2);
mul_add_c(a[3],b[5],c3,c1,c2);
mul_add_c(a[2],b[6],c3,c1,c2);
mul_add_c(a[1],b[7],c3,c1,c2);
r[8]=c3;
c3=0;
mul_add_c(a[2],b[7],c1,c2,c3);
mul_add_c(a[3],b[6],c1,c2,c3);
mul_add_c(a[4],b[5],c1,c2,c3);
mul_add_c(a[5],b[4],c1,c2,c3);
mul_add_c(a[6],b[3],c1,c2,c3);
mul_add_c(a[7],b[2],c1,c2,c3);
r[9]=c1;
c1=0;
mul_add_c(a[7],b[3],c2,c3,c1);
mul_add_c(a[6],b[4],c2,c3,c1);
mul_add_c(a[5],b[5],c2,c3,c1);
mul_add_c(a[4],b[6],c2,c3,c1);
mul_add_c(a[3],b[7],c2,c3,c1);
r[10]=c2;
c2=0;
mul_add_c(a[4],b[7],c3,c1,c2);
mul_add_c(a[5],b[6],c3,c1,c2);
mul_add_c(a[6],b[5],c3,c1,c2);
mul_add_c(a[7],b[4],c3,c1,c2);
r[11]=c3;
c3=0;
mul_add_c(a[7],b[5],c1,c2,c3);
mul_add_c(a[6],b[6],c1,c2,c3);
mul_add_c(a[5],b[7],c1,c2,c3);
r[12]=c1;
c1=0;
mul_add_c(a[6],b[7],c2,c3,c1);
mul_add_c(a[7],b[6],c2,c3,c1);
r[13]=c2;
c2=0;
mul_add_c(a[7],b[7],c3,c1,c2);
r[14]=c3;
r[15]=c1;
}
| 166,829 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: png_check_chunk_length(png_const_structrp png_ptr, const png_uint_32 length)
{
png_alloc_size_t limit = PNG_UINT_31_MAX;
# ifdef PNG_SET_USER_LIMITS_SUPPORTED
if (png_ptr->user_chunk_malloc_max > 0 &&
png_ptr->user_chunk_malloc_max < limit)
limit = png_ptr->user_chunk_malloc_max;
# elif PNG_USER_CHUNK_MALLOC_MAX > 0
if (PNG_USER_CHUNK_MALLOC_MAX < limit)
limit = PNG_USER_CHUNK_MALLOC_MAX;
# endif
if (png_ptr->chunk_name == png_IDAT)
{
png_alloc_size_t idat_limit = PNG_UINT_31_MAX;
size_t row_factor =
(png_ptr->width * png_ptr->channels * (png_ptr->bit_depth > 8? 2: 1)
+ 1 + (png_ptr->interlaced? 6: 0));
if (png_ptr->height > PNG_UINT_32_MAX/row_factor)
idat_limit=PNG_UINT_31_MAX;
else
idat_limit = png_ptr->height * row_factor;
row_factor = row_factor > 32566? 32566 : row_factor;
idat_limit += 6 + 5*(idat_limit/row_factor+1); /* zlib+deflate overhead */
idat_limit=idat_limit < PNG_UINT_31_MAX? idat_limit : PNG_UINT_31_MAX;
limit = limit < idat_limit? idat_limit : limit;
}
if (length > limit)
{
png_debug2(0," length = %lu, limit = %lu",
(unsigned long)length,(unsigned long)limit);
png_chunk_error(png_ptr, "chunk data is too large");
}
}
Commit Message: [libpng16] Fix the calculation of row_factor in png_check_chunk_length
(Bug report by Thuan Pham, SourceForge issue #278)
CWE ID: CWE-190 | png_check_chunk_length(png_const_structrp png_ptr, const png_uint_32 length)
{
png_alloc_size_t limit = PNG_UINT_31_MAX;
# ifdef PNG_SET_USER_LIMITS_SUPPORTED
if (png_ptr->user_chunk_malloc_max > 0 &&
png_ptr->user_chunk_malloc_max < limit)
limit = png_ptr->user_chunk_malloc_max;
# elif PNG_USER_CHUNK_MALLOC_MAX > 0
if (PNG_USER_CHUNK_MALLOC_MAX < limit)
limit = PNG_USER_CHUNK_MALLOC_MAX;
# endif
if (png_ptr->chunk_name == png_IDAT)
{
png_alloc_size_t idat_limit = PNG_UINT_31_MAX;
size_t row_factor =
(size_t)png_ptr->width
* (size_t)png_ptr->channels
* (png_ptr->bit_depth > 8? 2: 1)
+ 1
+ (png_ptr->interlaced? 6: 0);
if (png_ptr->height > PNG_UINT_32_MAX/row_factor)
idat_limit = PNG_UINT_31_MAX;
else
idat_limit = png_ptr->height * row_factor;
row_factor = row_factor > 32566? 32566 : row_factor;
idat_limit += 6 + 5*(idat_limit/row_factor+1); /* zlib+deflate overhead */
idat_limit=idat_limit < PNG_UINT_31_MAX? idat_limit : PNG_UINT_31_MAX;
limit = limit < idat_limit? idat_limit : limit;
}
if (length > limit)
{
png_debug2(0," length = %lu, limit = %lu",
(unsigned long)length,(unsigned long)limit);
png_chunk_error(png_ptr, "chunk data is too large");
}
}
| 169,151 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int ext4_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags)
{
struct extent_status es;
int retval;
int ret = 0;
#ifdef ES_AGGRESSIVE_TEST
struct ext4_map_blocks orig_map;
memcpy(&orig_map, map, sizeof(*map));
#endif
map->m_flags = 0;
ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
"logical block %lu\n", inode->i_ino, flags, map->m_len,
(unsigned long) map->m_lblk);
/*
* ext4_map_blocks returns an int, and m_len is an unsigned int
*/
if (unlikely(map->m_len > INT_MAX))
map->m_len = INT_MAX;
/* We can handle the block number less than EXT_MAX_BLOCKS */
if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
return -EFSCORRUPTED;
/* Lookup extent status tree firstly */
if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
map->m_pblk = ext4_es_pblock(&es) +
map->m_lblk - es.es_lblk;
map->m_flags |= ext4_es_is_written(&es) ?
EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
retval = es.es_len - (map->m_lblk - es.es_lblk);
if (retval > map->m_len)
retval = map->m_len;
map->m_len = retval;
} else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
map->m_pblk = 0;
retval = es.es_len - (map->m_lblk - es.es_lblk);
if (retval > map->m_len)
retval = map->m_len;
map->m_len = retval;
retval = 0;
} else {
BUG_ON(1);
}
#ifdef ES_AGGRESSIVE_TEST
ext4_map_blocks_es_recheck(handle, inode, map,
&orig_map, flags);
#endif
goto found;
}
/*
* Try to see if we can get the block without requesting a new
* file system block.
*/
down_read(&EXT4_I(inode)->i_data_sem);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
retval = ext4_ext_map_blocks(handle, inode, map, flags &
EXT4_GET_BLOCKS_KEEP_SIZE);
} else {
retval = ext4_ind_map_blocks(handle, inode, map, flags &
EXT4_GET_BLOCKS_KEEP_SIZE);
}
if (retval > 0) {
unsigned int status;
if (unlikely(retval != map->m_len)) {
ext4_warning(inode->i_sb,
"ES len assertion failed for inode "
"%lu: retval %d != map->m_len %d",
inode->i_ino, retval, map->m_len);
WARN_ON(1);
}
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
!(status & EXTENT_STATUS_WRITTEN) &&
ext4_find_delalloc_range(inode, map->m_lblk,
map->m_lblk + map->m_len - 1))
status |= EXTENT_STATUS_DELAYED;
ret = ext4_es_insert_extent(inode, map->m_lblk,
map->m_len, map->m_pblk, status);
if (ret < 0)
retval = ret;
}
up_read((&EXT4_I(inode)->i_data_sem));
found:
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
ret = check_block_validity(inode, map);
if (ret != 0)
return ret;
}
/* If it is only a block(s) look up */
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
return retval;
/*
* Returns if the blocks have already allocated
*
* Note that if blocks have been preallocated
* ext4_ext_get_block() returns the create = 0
* with buffer head unmapped.
*/
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
/*
* If we need to convert extent to unwritten
* we continue and do the actual work in
* ext4_ext_map_blocks()
*/
if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
return retval;
/*
* Here we clear m_flags because after allocating an new extent,
* it will be set again.
*/
map->m_flags &= ~EXT4_MAP_FLAGS;
/*
* New blocks allocate and/or writing to unwritten extent
* will possibly result in updating i_data, so we take
* the write lock of i_data_sem, and call get_block()
* with create == 1 flag.
*/
down_write(&EXT4_I(inode)->i_data_sem);
/*
* We need to check for EXT4 here because migrate
* could have changed the inode type in between
*/
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
retval = ext4_ext_map_blocks(handle, inode, map, flags);
} else {
retval = ext4_ind_map_blocks(handle, inode, map, flags);
if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
/*
* We allocated new blocks which will result in
* i_data's format changing. Force the migrate
* to fail by clearing migrate flags
*/
ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
}
/*
* Update reserved blocks/metadata blocks after successful
* block allocation which had been deferred till now. We don't
* support fallocate for non extent files. So we can update
* reserve space here.
*/
if ((retval > 0) &&
(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
ext4_da_update_reserve_space(inode, retval, 1);
}
if (retval > 0) {
unsigned int status;
if (unlikely(retval != map->m_len)) {
ext4_warning(inode->i_sb,
"ES len assertion failed for inode "
"%lu: retval %d != map->m_len %d",
inode->i_ino, retval, map->m_len);
WARN_ON(1);
}
/*
* We have to zeroout blocks before inserting them into extent
* status tree. Otherwise someone could look them up there and
* use them before they are really zeroed.
*/
if (flags & EXT4_GET_BLOCKS_ZERO &&
map->m_flags & EXT4_MAP_MAPPED &&
map->m_flags & EXT4_MAP_NEW) {
ret = ext4_issue_zeroout(inode, map->m_lblk,
map->m_pblk, map->m_len);
if (ret) {
retval = ret;
goto out_sem;
}
}
/*
* If the extent has been zeroed out, we don't need to update
* extent status tree.
*/
if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
if (ext4_es_is_written(&es))
goto out_sem;
}
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
!(status & EXTENT_STATUS_WRITTEN) &&
ext4_find_delalloc_range(inode, map->m_lblk,
map->m_lblk + map->m_len - 1))
status |= EXTENT_STATUS_DELAYED;
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
map->m_pblk, status);
if (ret < 0) {
retval = ret;
goto out_sem;
}
}
out_sem:
up_write((&EXT4_I(inode)->i_data_sem));
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
ret = check_block_validity(inode, map);
if (ret != 0)
return ret;
}
return retval;
}
Commit Message: ext4: fix data exposure after a crash
Huang has reported that in his powerfail testing he is seeing stale
block contents in some of recently allocated blocks although he mounts
ext4 in data=ordered mode. After some investigation I have found out
that indeed when delayed allocation is used, we don't add inode to
transaction's list of inodes needing flushing before commit. Originally
we were doing that but commit f3b59291a69d removed the logic with a
flawed argument that it is not needed.
The problem is that although for delayed allocated blocks we write their
contents immediately after allocating them, there is no guarantee that
the IO scheduler or device doesn't reorder things and thus transaction
allocating blocks and attaching them to inode can reach stable storage
before actual block contents. Actually whenever we attach freshly
allocated blocks to inode using a written extent, we should add inode to
transaction's ordered inode list to make sure we properly wait for block
contents to be written before committing the transaction. So that is
what we do in this patch. This also handles other cases where stale data
exposure was possible - like filling hole via mmap in
data=ordered,nodelalloc mode.
The only exception to the above rule are extending direct IO writes where
blkdev_direct_IO() waits for IO to complete before increasing i_size and
thus stale data exposure is not possible. For now we don't complicate
the code with optimizing this special case since the overhead is pretty
low. In case this is observed to be a performance problem we can always
handle it using a special flag to ext4_map_blocks().
CC: [email protected]
Fixes: f3b59291a69d0b734be1fc8be489fef2dd846d3d
Reported-by: "HUANG Weller (CM/ESW12-CN)" <[email protected]>
Tested-by: "HUANG Weller (CM/ESW12-CN)" <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
CWE ID: CWE-200 | int ext4_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags)
{
struct extent_status es;
int retval;
int ret = 0;
#ifdef ES_AGGRESSIVE_TEST
struct ext4_map_blocks orig_map;
memcpy(&orig_map, map, sizeof(*map));
#endif
map->m_flags = 0;
ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
"logical block %lu\n", inode->i_ino, flags, map->m_len,
(unsigned long) map->m_lblk);
/*
* ext4_map_blocks returns an int, and m_len is an unsigned int
*/
if (unlikely(map->m_len > INT_MAX))
map->m_len = INT_MAX;
/* We can handle the block number less than EXT_MAX_BLOCKS */
if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
return -EFSCORRUPTED;
/* Lookup extent status tree firstly */
if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
map->m_pblk = ext4_es_pblock(&es) +
map->m_lblk - es.es_lblk;
map->m_flags |= ext4_es_is_written(&es) ?
EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
retval = es.es_len - (map->m_lblk - es.es_lblk);
if (retval > map->m_len)
retval = map->m_len;
map->m_len = retval;
} else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
map->m_pblk = 0;
retval = es.es_len - (map->m_lblk - es.es_lblk);
if (retval > map->m_len)
retval = map->m_len;
map->m_len = retval;
retval = 0;
} else {
BUG_ON(1);
}
#ifdef ES_AGGRESSIVE_TEST
ext4_map_blocks_es_recheck(handle, inode, map,
&orig_map, flags);
#endif
goto found;
}
/*
* Try to see if we can get the block without requesting a new
* file system block.
*/
down_read(&EXT4_I(inode)->i_data_sem);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
retval = ext4_ext_map_blocks(handle, inode, map, flags &
EXT4_GET_BLOCKS_KEEP_SIZE);
} else {
retval = ext4_ind_map_blocks(handle, inode, map, flags &
EXT4_GET_BLOCKS_KEEP_SIZE);
}
if (retval > 0) {
unsigned int status;
if (unlikely(retval != map->m_len)) {
ext4_warning(inode->i_sb,
"ES len assertion failed for inode "
"%lu: retval %d != map->m_len %d",
inode->i_ino, retval, map->m_len);
WARN_ON(1);
}
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
!(status & EXTENT_STATUS_WRITTEN) &&
ext4_find_delalloc_range(inode, map->m_lblk,
map->m_lblk + map->m_len - 1))
status |= EXTENT_STATUS_DELAYED;
ret = ext4_es_insert_extent(inode, map->m_lblk,
map->m_len, map->m_pblk, status);
if (ret < 0)
retval = ret;
}
up_read((&EXT4_I(inode)->i_data_sem));
found:
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
ret = check_block_validity(inode, map);
if (ret != 0)
return ret;
}
/* If it is only a block(s) look up */
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
return retval;
/*
* Returns if the blocks have already allocated
*
* Note that if blocks have been preallocated
* ext4_ext_get_block() returns the create = 0
* with buffer head unmapped.
*/
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
/*
* If we need to convert extent to unwritten
* we continue and do the actual work in
* ext4_ext_map_blocks()
*/
if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
return retval;
/*
* Here we clear m_flags because after allocating an new extent,
* it will be set again.
*/
map->m_flags &= ~EXT4_MAP_FLAGS;
/*
* New blocks allocate and/or writing to unwritten extent
* will possibly result in updating i_data, so we take
* the write lock of i_data_sem, and call get_block()
* with create == 1 flag.
*/
down_write(&EXT4_I(inode)->i_data_sem);
/*
* We need to check for EXT4 here because migrate
* could have changed the inode type in between
*/
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
retval = ext4_ext_map_blocks(handle, inode, map, flags);
} else {
retval = ext4_ind_map_blocks(handle, inode, map, flags);
if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
/*
* We allocated new blocks which will result in
* i_data's format changing. Force the migrate
* to fail by clearing migrate flags
*/
ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
}
/*
* Update reserved blocks/metadata blocks after successful
* block allocation which had been deferred till now. We don't
* support fallocate for non extent files. So we can update
* reserve space here.
*/
if ((retval > 0) &&
(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
ext4_da_update_reserve_space(inode, retval, 1);
}
if (retval > 0) {
unsigned int status;
if (unlikely(retval != map->m_len)) {
ext4_warning(inode->i_sb,
"ES len assertion failed for inode "
"%lu: retval %d != map->m_len %d",
inode->i_ino, retval, map->m_len);
WARN_ON(1);
}
/*
* We have to zeroout blocks before inserting them into extent
* status tree. Otherwise someone could look them up there and
* use them before they are really zeroed.
*/
if (flags & EXT4_GET_BLOCKS_ZERO &&
map->m_flags & EXT4_MAP_MAPPED &&
map->m_flags & EXT4_MAP_NEW) {
ret = ext4_issue_zeroout(inode, map->m_lblk,
map->m_pblk, map->m_len);
if (ret) {
retval = ret;
goto out_sem;
}
}
/*
* If the extent has been zeroed out, we don't need to update
* extent status tree.
*/
if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
if (ext4_es_is_written(&es))
goto out_sem;
}
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
!(status & EXTENT_STATUS_WRITTEN) &&
ext4_find_delalloc_range(inode, map->m_lblk,
map->m_lblk + map->m_len - 1))
status |= EXTENT_STATUS_DELAYED;
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
map->m_pblk, status);
if (ret < 0) {
retval = ret;
goto out_sem;
}
}
out_sem:
up_write((&EXT4_I(inode)->i_data_sem));
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
ret = check_block_validity(inode, map);
if (ret != 0)
return ret;
/*
* Inodes with freshly allocated blocks where contents will be
* visible after transaction commit must be on transaction's
* ordered data list.
*/
if (map->m_flags & EXT4_MAP_NEW &&
!(map->m_flags & EXT4_MAP_UNWRITTEN) &&
!(flags & EXT4_GET_BLOCKS_ZERO) &&
!IS_NOQUOTA(inode) &&
ext4_should_order_data(inode)) {
ret = ext4_jbd2_file_inode(handle, inode);
if (ret)
return ret;
}
}
return retval;
}
| 168,270 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
{
const void *buf = intf->altsetting->extra;
size_t buflen = intf->altsetting->extralen;
struct usb_cdc_union_desc *union_desc;
if (!buf) {
dev_err(&intf->dev, "Missing descriptor data\n");
return NULL;
}
if (!buflen) {
dev_err(&intf->dev, "Zero length descriptor\n");
return NULL;
}
while (buflen > 0) {
union_desc = (struct usb_cdc_union_desc *)buf;
if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE &&
union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) {
dev_dbg(&intf->dev, "Found union header\n");
return union_desc;
}
buflen -= union_desc->bLength;
buf += union_desc->bLength;
}
dev_err(&intf->dev, "Missing CDC union descriptor\n");
return NULL;
}
Commit Message: Input: ims-psu - check if CDC union descriptor is sane
Before trying to use CDC union descriptor, try to validate whether that it
is sane by checking that intf->altsetting->extra is big enough and that
descriptor bLength is not too big and not too small.
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: Dmitry Torokhov <[email protected]>
CWE ID: CWE-125 | ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
{
const void *buf = intf->altsetting->extra;
size_t buflen = intf->altsetting->extralen;
struct usb_cdc_union_desc *union_desc;
if (!buf) {
dev_err(&intf->dev, "Missing descriptor data\n");
return NULL;
}
if (!buflen) {
dev_err(&intf->dev, "Zero length descriptor\n");
return NULL;
}
while (buflen >= sizeof(*union_desc)) {
union_desc = (struct usb_cdc_union_desc *)buf;
if (union_desc->bLength > buflen) {
dev_err(&intf->dev, "Too large descriptor\n");
return NULL;
}
if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE &&
union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) {
dev_dbg(&intf->dev, "Found union header\n");
if (union_desc->bLength >= sizeof(*union_desc))
return union_desc;
dev_err(&intf->dev,
"Union descriptor to short (%d vs %zd\n)",
union_desc->bLength, sizeof(*union_desc));
return NULL;
}
buflen -= union_desc->bLength;
buf += union_desc->bLength;
}
dev_err(&intf->dev, "Missing CDC union descriptor\n");
return NULL;
}
| 167,672 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int WebGraphicsContext3DCommandBufferImpl::GetGPUProcessID() {
return host_ ? host_->gpu_process_id() : 0;
}
Commit Message: Convert plugin and GPU process to brokered handle duplication.
BUG=119250
Review URL: https://chromiumcodereview.appspot.com/9958034
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@132303 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: | int WebGraphicsContext3DCommandBufferImpl::GetGPUProcessID() {
return host_ ? host_->gpu_host_id() : 0;
}
| 170,930 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void ping_unhash(struct sock *sk)
{
struct inet_sock *isk = inet_sk(sk);
pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
if (sk_hashed(sk)) {
write_lock_bh(&ping_table.lock);
hlist_nulls_del(&sk->sk_nulls_node);
sk_nulls_node_init(&sk->sk_nulls_node);
sock_put(sk);
isk->inet_num = 0;
isk->inet_sport = 0;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
write_unlock_bh(&ping_table.lock);
}
}
Commit Message: ping: implement proper locking
We got a report of yet another bug in ping
http://www.openwall.com/lists/oss-security/2017/03/24/6
->disconnect() is not called with socket lock held.
Fix this by acquiring ping rwlock earlier.
Thanks to Daniel, Alexander and Andrey for letting us know this problem.
Fixes: c319b4d76b9e ("net: ipv4: add IPPROTO_ICMP socket kind")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Daniel Jiang <[email protected]>
Reported-by: Solar Designer <[email protected]>
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: | void ping_unhash(struct sock *sk)
{
struct inet_sock *isk = inet_sk(sk);
pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
write_lock_bh(&ping_table.lock);
if (sk_hashed(sk)) {
hlist_nulls_del(&sk->sk_nulls_node);
sk_nulls_node_init(&sk->sk_nulls_node);
sock_put(sk);
isk->inet_num = 0;
isk->inet_sport = 0;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
}
write_unlock_bh(&ping_table.lock);
}
| 168,435 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void WebGL2RenderingContextBase::texSubImage2D(GLenum target,
GLint level,
GLint xoffset,
GLint yoffset,
GLsizei width,
GLsizei height,
GLenum format,
GLenum type,
GLintptr offset) {
if (isContextLost())
return;
if (!ValidateTexture2DBinding("texSubImage2D", target))
return;
if (!bound_pixel_unpack_buffer_) {
SynthesizeGLError(GL_INVALID_OPERATION, "texSubImage2D",
"no bound PIXEL_UNPACK_BUFFER");
return;
}
if (!ValidateTexFunc("texSubImage2D", kTexSubImage, kSourceUnpackBuffer,
target, level, 0, width, height, 1, 0, format, type,
xoffset, yoffset, 0))
return;
if (!ValidateValueFitNonNegInt32("texSubImage2D", "offset", offset))
return;
ContextGL()->TexSubImage2D(target, level, xoffset, yoffset, width, height,
format, type,
reinterpret_cast<const void*>(offset));
}
Commit Message: Implement 2D texture uploading from client array with FLIP_Y or PREMULTIPLY_ALPHA.
BUG=774174
TEST=https://github.com/KhronosGroup/WebGL/pull/2555
[email protected]
Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: I4f4e7636314502451104730501a5048a5d7b9f3f
Reviewed-on: https://chromium-review.googlesource.com/808665
Commit-Queue: Zhenyao Mo <[email protected]>
Reviewed-by: Kenneth Russell <[email protected]>
Cr-Commit-Position: refs/heads/master@{#522003}
CWE ID: CWE-125 | void WebGL2RenderingContextBase::texSubImage2D(GLenum target,
GLint level,
GLint xoffset,
GLint yoffset,
GLsizei width,
GLsizei height,
GLenum format,
GLenum type,
GLintptr offset) {
if (isContextLost())
return;
if (!ValidateTexture2DBinding("texSubImage2D", target))
return;
if (!bound_pixel_unpack_buffer_) {
SynthesizeGLError(GL_INVALID_OPERATION, "texSubImage2D",
"no bound PIXEL_UNPACK_BUFFER");
return;
}
if (unpack_flip_y_ || unpack_premultiply_alpha_) {
SynthesizeGLError(
GL_INVALID_OPERATION, "texSubImage2D",
"FLIP_Y or PREMULTIPLY_ALPHA isn't allowed while uploading from PBO");
return;
}
if (!ValidateTexFunc("texSubImage2D", kTexSubImage, kSourceUnpackBuffer,
target, level, 0, width, height, 1, 0, format, type,
xoffset, yoffset, 0))
return;
if (!ValidateValueFitNonNegInt32("texSubImage2D", "offset", offset))
return;
ContextGL()->TexSubImage2D(target, level, xoffset, yoffset, width, height,
format, type,
reinterpret_cast<const void*>(offset));
}
| 172,678 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int nl80211_start_sched_scan(struct sk_buff *skb,
struct genl_info *info)
{
struct cfg80211_sched_scan_request *request;
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct nlattr *attr;
struct wiphy *wiphy;
int err, tmp, n_ssids = 0, n_channels, i;
u32 interval;
enum ieee80211_band band;
size_t ie_len;
if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
!rdev->ops->sched_scan_start)
return -EOPNOTSUPP;
if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
return -EINVAL;
if (rdev->sched_scan_req)
return -EINPROGRESS;
if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
return -EINVAL;
interval = nla_get_u32(info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]);
if (interval == 0)
return -EINVAL;
wiphy = &rdev->wiphy;
if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
n_channels = validate_scan_freqs(
info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]);
if (!n_channels)
return -EINVAL;
} else {
n_channels = 0;
for (band = 0; band < IEEE80211_NUM_BANDS; band++)
if (wiphy->bands[band])
n_channels += wiphy->bands[band]->n_channels;
}
if (info->attrs[NL80211_ATTR_SCAN_SSIDS])
nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
tmp)
n_ssids++;
if (n_ssids > wiphy->max_scan_ssids)
return -EINVAL;
if (info->attrs[NL80211_ATTR_IE])
ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
else
ie_len = 0;
if (ie_len > wiphy->max_scan_ie_len)
return -EINVAL;
request = kzalloc(sizeof(*request)
+ sizeof(*request->ssids) * n_ssids
+ sizeof(*request->channels) * n_channels
+ ie_len, GFP_KERNEL);
if (!request)
return -ENOMEM;
if (n_ssids)
request->ssids = (void *)&request->channels[n_channels];
request->n_ssids = n_ssids;
if (ie_len) {
if (request->ssids)
request->ie = (void *)(request->ssids + n_ssids);
else
request->ie = (void *)(request->channels + n_channels);
}
i = 0;
if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
/* user specified, bail out if channel not found */
nla_for_each_nested(attr,
info->attrs[NL80211_ATTR_SCAN_FREQUENCIES],
tmp) {
struct ieee80211_channel *chan;
chan = ieee80211_get_channel(wiphy, nla_get_u32(attr));
if (!chan) {
err = -EINVAL;
goto out_free;
}
/* ignore disabled channels */
if (chan->flags & IEEE80211_CHAN_DISABLED)
continue;
request->channels[i] = chan;
i++;
}
} else {
/* all channels */
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
int j;
if (!wiphy->bands[band])
continue;
for (j = 0; j < wiphy->bands[band]->n_channels; j++) {
struct ieee80211_channel *chan;
chan = &wiphy->bands[band]->channels[j];
if (chan->flags & IEEE80211_CHAN_DISABLED)
continue;
request->channels[i] = chan;
i++;
}
}
}
if (!i) {
err = -EINVAL;
goto out_free;
}
request->n_channels = i;
i = 0;
if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
tmp) {
if (request->ssids[i].ssid_len >
IEEE80211_MAX_SSID_LEN) {
err = -EINVAL;
goto out_free;
}
memcpy(request->ssids[i].ssid, nla_data(attr),
nla_len(attr));
request->ssids[i].ssid_len = nla_len(attr);
i++;
}
}
if (info->attrs[NL80211_ATTR_IE]) {
request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
memcpy((void *)request->ie,
nla_data(info->attrs[NL80211_ATTR_IE]),
request->ie_len);
}
request->dev = dev;
request->wiphy = &rdev->wiphy;
request->interval = interval;
err = rdev->ops->sched_scan_start(&rdev->wiphy, dev, request);
if (!err) {
rdev->sched_scan_req = request;
nl80211_send_sched_scan(rdev, dev,
NL80211_CMD_START_SCHED_SCAN);
goto out;
}
out_free:
kfree(request);
out:
return err;
}
Commit Message: nl80211: fix check for valid SSID size in scan operations
In both trigger_scan and sched_scan operations, we were checking for
the SSID length before assigning the value correctly. Since the
memory was just kzalloc'ed, the check was always failing and SSID with
over 32 characters were allowed to go through.
This was causing a buffer overflow when copying the actual SSID to the
proper place.
This bug has been there since 2.6.29-rc4.
Cc: [email protected]
Signed-off-by: Luciano Coelho <[email protected]>
Signed-off-by: John W. Linville <[email protected]>
CWE ID: CWE-119 | static int nl80211_start_sched_scan(struct sk_buff *skb,
struct genl_info *info)
{
struct cfg80211_sched_scan_request *request;
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct nlattr *attr;
struct wiphy *wiphy;
int err, tmp, n_ssids = 0, n_channels, i;
u32 interval;
enum ieee80211_band band;
size_t ie_len;
if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
!rdev->ops->sched_scan_start)
return -EOPNOTSUPP;
if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
return -EINVAL;
if (rdev->sched_scan_req)
return -EINPROGRESS;
if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
return -EINVAL;
interval = nla_get_u32(info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]);
if (interval == 0)
return -EINVAL;
wiphy = &rdev->wiphy;
if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
n_channels = validate_scan_freqs(
info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]);
if (!n_channels)
return -EINVAL;
} else {
n_channels = 0;
for (band = 0; band < IEEE80211_NUM_BANDS; band++)
if (wiphy->bands[band])
n_channels += wiphy->bands[band]->n_channels;
}
if (info->attrs[NL80211_ATTR_SCAN_SSIDS])
nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
tmp)
n_ssids++;
if (n_ssids > wiphy->max_scan_ssids)
return -EINVAL;
if (info->attrs[NL80211_ATTR_IE])
ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
else
ie_len = 0;
if (ie_len > wiphy->max_scan_ie_len)
return -EINVAL;
request = kzalloc(sizeof(*request)
+ sizeof(*request->ssids) * n_ssids
+ sizeof(*request->channels) * n_channels
+ ie_len, GFP_KERNEL);
if (!request)
return -ENOMEM;
if (n_ssids)
request->ssids = (void *)&request->channels[n_channels];
request->n_ssids = n_ssids;
if (ie_len) {
if (request->ssids)
request->ie = (void *)(request->ssids + n_ssids);
else
request->ie = (void *)(request->channels + n_channels);
}
i = 0;
if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
/* user specified, bail out if channel not found */
nla_for_each_nested(attr,
info->attrs[NL80211_ATTR_SCAN_FREQUENCIES],
tmp) {
struct ieee80211_channel *chan;
chan = ieee80211_get_channel(wiphy, nla_get_u32(attr));
if (!chan) {
err = -EINVAL;
goto out_free;
}
/* ignore disabled channels */
if (chan->flags & IEEE80211_CHAN_DISABLED)
continue;
request->channels[i] = chan;
i++;
}
} else {
/* all channels */
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
int j;
if (!wiphy->bands[band])
continue;
for (j = 0; j < wiphy->bands[band]->n_channels; j++) {
struct ieee80211_channel *chan;
chan = &wiphy->bands[band]->channels[j];
if (chan->flags & IEEE80211_CHAN_DISABLED)
continue;
request->channels[i] = chan;
i++;
}
}
}
if (!i) {
err = -EINVAL;
goto out_free;
}
request->n_channels = i;
i = 0;
if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
tmp) {
request->ssids[i].ssid_len = nla_len(attr);
if (request->ssids[i].ssid_len >
IEEE80211_MAX_SSID_LEN) {
err = -EINVAL;
goto out_free;
}
memcpy(request->ssids[i].ssid, nla_data(attr),
nla_len(attr));
i++;
}
}
if (info->attrs[NL80211_ATTR_IE]) {
request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
memcpy((void *)request->ie,
nla_data(info->attrs[NL80211_ATTR_IE]),
request->ie_len);
}
request->dev = dev;
request->wiphy = &rdev->wiphy;
request->interval = interval;
err = rdev->ops->sched_scan_start(&rdev->wiphy, dev, request);
if (!err) {
rdev->sched_scan_req = request;
nl80211_send_sched_scan(rdev, dev,
NL80211_CMD_START_SCHED_SCAN);
goto out;
}
out_free:
kfree(request);
out:
return err;
}
| 165,857 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: VaapiVideoDecodeAccelerator::VaapiH264Accelerator::CreateH264Picture() {
scoped_refptr<VaapiDecodeSurface> va_surface = vaapi_dec_->CreateSurface();
if (!va_surface)
return nullptr;
return new VaapiH264Picture(std::move(va_surface));
}
Commit Message: vaapi vda: Delete owned objects on worker thread in Cleanup()
This CL adds a SEQUENCE_CHECKER to Vaapi*Accelerator classes, and
posts the destruction of those objects to the appropriate thread on
Cleanup().
Also makes {H264,VP8,VP9}Picture RefCountedThreadSafe, see miu@
comment in
https://chromium-review.googlesource.com/c/chromium/src/+/794091#message-a64bed985cfaf8a19499a517bb110a7ce581dc0f
TEST=play back VP9/VP8/H264 w/ simplechrome on soraka, Release build
unstripped, let video play for a few seconds then navigate back; no
crashes. Unittests as before:
video_decode_accelerator_unittest --test_video_data=test-25fps.vp9:320:240:250:250:35:150:12
video_decode_accelerator_unittest --test_video_data=test-25fps.vp8:320:240:250:250:35:150:11
video_decode_accelerator_unittest --test_video_data=test-25fps.h264:320:240:250:258:35:150:1
Bug: 789160
Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: I7d96aaf89c92bf46f00c8b8b36798e057a842ed2
Reviewed-on: https://chromium-review.googlesource.com/794091
Reviewed-by: Pawel Osciak <[email protected]>
Commit-Queue: Miguel Casas <[email protected]>
Cr-Commit-Position: refs/heads/master@{#523372}
CWE ID: CWE-362 | VaapiVideoDecodeAccelerator::VaapiH264Accelerator::CreateH264Picture() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
scoped_refptr<VaapiDecodeSurface> va_surface = vaapi_dec_->CreateSurface();
if (!va_surface)
return nullptr;
return new VaapiH264Picture(std::move(va_surface));
}
| 172,797 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void CSSDefaultStyleSheets::ensureDefaultStyleSheetsForElement(Element* element, bool& changedDefaultStyle)
{
if (simpleDefaultStyleSheet && !elementCanUseSimpleDefaultStyle(element)) {
loadFullDefaultStyle();
changedDefaultStyle = true;
}
if (element->isSVGElement() && !svgStyleSheet) {
svgStyleSheet = parseUASheet(svgUserAgentStyleSheet, sizeof(svgUserAgentStyleSheet));
defaultStyle->addRulesFromSheet(svgStyleSheet, screenEval());
defaultPrintStyle->addRulesFromSheet(svgStyleSheet, printEval());
changedDefaultStyle = true;
}
if (!mediaControlsStyleSheet && (isHTMLVideoElement(element) || element->hasTagName(audioTag))) {
String mediaRules = String(mediaControlsUserAgentStyleSheet, sizeof(mediaControlsUserAgentStyleSheet)) + RenderTheme::theme().extraMediaControlsStyleSheet();
mediaControlsStyleSheet = parseUASheet(mediaRules);
defaultStyle->addRulesFromSheet(mediaControlsStyleSheet, screenEval());
defaultPrintStyle->addRulesFromSheet(mediaControlsStyleSheet, printEval());
changedDefaultStyle = true;
}
if (!fullscreenStyleSheet && FullscreenElementStack::isFullScreen(&element->document())) {
String fullscreenRules = String(fullscreenUserAgentStyleSheet, sizeof(fullscreenUserAgentStyleSheet)) + RenderTheme::theme().extraFullScreenStyleSheet();
fullscreenStyleSheet = parseUASheet(fullscreenRules);
defaultStyle->addRulesFromSheet(fullscreenStyleSheet, screenEval());
defaultQuirksStyle->addRulesFromSheet(fullscreenStyleSheet, screenEval());
changedDefaultStyle = true;
}
ASSERT(defaultStyle->features().idsInRules.isEmpty());
ASSERT(defaultStyle->features().siblingRules.isEmpty());
}
Commit Message: Remove the Simple Default Stylesheet, it's just a foot-gun.
We've been bitten by the Simple Default Stylesheet being out
of sync with the real html.css twice this week.
The Simple Default Stylesheet was invented years ago for Mac:
http://trac.webkit.org/changeset/36135
It nicely handles the case where you just want to create
a single WebView and parse some simple HTML either without
styling said HTML, or only to display a small string, etc.
Note that this optimization/complexity *only* helps for the
very first document, since the default stylesheets are
all static (process-global) variables. Since any real page
on the internet uses a tag not covered by the simple default
stylesheet, not real load benefits from this optimization.
Only uses of WebView which were just rendering small bits
of text might have benefited from this. about:blank would
also have used this sheet.
This was a common application for some uses of WebView back
in those days. These days, even with WebView on Android,
there are likely much larger overheads than parsing the
html.css stylesheet, so making it required seems like the
right tradeoff of code-simplicity for this case.
BUG=319556
Review URL: https://codereview.chromium.org/73723005
git-svn-id: svn://svn.chromium.org/blink/trunk@162153 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-399 | void CSSDefaultStyleSheets::ensureDefaultStyleSheetsForElement(Element* element, bool& changedDefaultStyle)
{
if (element->isSVGElement() && !svgStyleSheet) {
svgStyleSheet = parseUASheet(svgUserAgentStyleSheet, sizeof(svgUserAgentStyleSheet));
defaultStyle->addRulesFromSheet(svgStyleSheet, screenEval());
defaultPrintStyle->addRulesFromSheet(svgStyleSheet, printEval());
changedDefaultStyle = true;
}
if (!mediaControlsStyleSheet && (isHTMLVideoElement(element) || element->hasTagName(audioTag))) {
String mediaRules = String(mediaControlsUserAgentStyleSheet, sizeof(mediaControlsUserAgentStyleSheet)) + RenderTheme::theme().extraMediaControlsStyleSheet();
mediaControlsStyleSheet = parseUASheet(mediaRules);
defaultStyle->addRulesFromSheet(mediaControlsStyleSheet, screenEval());
defaultPrintStyle->addRulesFromSheet(mediaControlsStyleSheet, printEval());
changedDefaultStyle = true;
}
if (!fullscreenStyleSheet && FullscreenElementStack::isFullScreen(&element->document())) {
String fullscreenRules = String(fullscreenUserAgentStyleSheet, sizeof(fullscreenUserAgentStyleSheet)) + RenderTheme::theme().extraFullScreenStyleSheet();
fullscreenStyleSheet = parseUASheet(fullscreenRules);
defaultStyle->addRulesFromSheet(fullscreenStyleSheet, screenEval());
defaultQuirksStyle->addRulesFromSheet(fullscreenStyleSheet, screenEval());
changedDefaultStyle = true;
}
ASSERT(defaultStyle->features().idsInRules.isEmpty());
ASSERT(defaultStyle->features().siblingRules.isEmpty());
}
| 171,579 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: char *path_name(const struct name_path *path, const char *name)
{
const struct name_path *p;
char *n, *m;
int nlen = strlen(name);
int len = nlen + 1;
for (p = path; p; p = p->up) {
if (p->elem_len)
len += p->elem_len + 1;
}
n = xmalloc(len);
m = n + len - (nlen + 1);
strcpy(m, name);
for (p = path; p; p = p->up) {
if (p->elem_len) {
m -= p->elem_len + 1;
memcpy(m, p->elem, p->elem_len);
m[p->elem_len] = '/';
}
}
return n;
}
Commit Message: prefer memcpy to strcpy
When we already know the length of a string (e.g., because
we just malloc'd to fit it), it's nicer to use memcpy than
strcpy, as it makes it more obvious that we are not going to
overflow the buffer (because the size we pass matches the
size in the allocation).
This also eliminates calls to strcpy, which make auditing
the code base harder.
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]>
CWE ID: CWE-119 | char *path_name(const struct name_path *path, const char *name)
{
const struct name_path *p;
char *n, *m;
int nlen = strlen(name);
int len = nlen + 1;
for (p = path; p; p = p->up) {
if (p->elem_len)
len += p->elem_len + 1;
}
n = xmalloc(len);
m = n + len - (nlen + 1);
memcpy(m, name, nlen + 1);
for (p = path; p; p = p->up) {
if (p->elem_len) {
m -= p->elem_len + 1;
memcpy(m, p->elem, p->elem_len);
m[p->elem_len] = '/';
}
}
return n;
}
| 167,429 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void ResourcePrefetchPredictor::LearnOrigins(
const std::string& host,
const GURL& main_frame_origin,
const std::map<GURL, OriginRequestSummary>& summaries) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
if (host.size() > ResourcePrefetchPredictorTables::kMaxStringLength)
return;
OriginData data;
bool exists = origin_data_->TryGetData(host, &data);
if (!exists) {
data.set_host(host);
data.set_last_visit_time(base::Time::Now().ToInternalValue());
size_t origins_size = summaries.size();
auto ordered_origins =
std::vector<const OriginRequestSummary*>(origins_size);
for (const auto& kv : summaries) {
size_t index = kv.second.first_occurrence;
DCHECK_LT(index, origins_size);
ordered_origins[index] = &kv.second;
}
for (const OriginRequestSummary* summary : ordered_origins) {
auto* origin_to_add = data.add_origins();
InitializeOriginStatFromOriginRequestSummary(origin_to_add, *summary);
}
} else {
data.set_last_visit_time(base::Time::Now().ToInternalValue());
std::map<GURL, int> old_index;
int old_size = static_cast<int>(data.origins_size());
for (int i = 0; i < old_size; ++i) {
bool is_new =
old_index.insert({GURL(data.origins(i).origin()), i}).second;
DCHECK(is_new);
}
for (int i = 0; i < old_size; ++i) {
auto* old_origin = data.mutable_origins(i);
GURL origin(old_origin->origin());
auto it = summaries.find(origin);
if (it == summaries.end()) {
old_origin->set_number_of_misses(old_origin->number_of_misses() + 1);
old_origin->set_consecutive_misses(old_origin->consecutive_misses() +
1);
} else {
const auto& new_origin = it->second;
old_origin->set_always_access_network(new_origin.always_access_network);
old_origin->set_accessed_network(new_origin.accessed_network);
int position = new_origin.first_occurrence + 1;
int total =
old_origin->number_of_hits() + old_origin->number_of_misses();
old_origin->set_average_position(
((old_origin->average_position() * total) + position) /
(total + 1));
old_origin->set_number_of_hits(old_origin->number_of_hits() + 1);
old_origin->set_consecutive_misses(0);
}
}
for (const auto& kv : summaries) {
if (old_index.find(kv.first) != old_index.end())
continue;
auto* origin_to_add = data.add_origins();
InitializeOriginStatFromOriginRequestSummary(origin_to_add, kv.second);
}
}
ResourcePrefetchPredictorTables::TrimOrigins(&data,
config_.max_consecutive_misses);
ResourcePrefetchPredictorTables::SortOrigins(&data, main_frame_origin.spec());
if (data.origins_size() > static_cast<int>(config_.max_origins_per_entry)) {
data.mutable_origins()->DeleteSubrange(
config_.max_origins_per_entry,
data.origins_size() - config_.max_origins_per_entry);
}
if (data.origins_size() == 0)
origin_data_->DeleteData({host});
else
origin_data_->UpdateData(host, data);
}
Commit Message: Origins should be represented as url::Origin (not as GURL).
As pointed out in //docs/security/origin-vs-url.md, origins should be
represented as url::Origin (not as GURL). This CL applies this
guideline to predictor-related code and changes the type of the
following fields from GURL to url::Origin:
- OriginRequestSummary::origin
- PreconnectedRequestStats::origin
- PreconnectRequest::origin
The old code did not depend on any non-origin parts of GURL
(like path and/or query). Therefore, this CL has no intended
behavior change.
Bug: 973885
Change-Id: Idd14590b4834cb9d50c74ed747b595fe1a4ba357
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1895167
Commit-Queue: Łukasz Anforowicz <[email protected]>
Reviewed-by: Alex Ilin <[email protected]>
Cr-Commit-Position: refs/heads/master@{#716311}
CWE ID: CWE-125 | void ResourcePrefetchPredictor::LearnOrigins(
const std::string& host,
const GURL& main_frame_origin,
const std::map<url::Origin, OriginRequestSummary>& summaries) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
if (host.size() > ResourcePrefetchPredictorTables::kMaxStringLength)
return;
OriginData data;
bool exists = origin_data_->TryGetData(host, &data);
if (!exists) {
data.set_host(host);
data.set_last_visit_time(base::Time::Now().ToInternalValue());
size_t origins_size = summaries.size();
auto ordered_origins =
std::vector<const OriginRequestSummary*>(origins_size);
for (const auto& kv : summaries) {
size_t index = kv.second.first_occurrence;
DCHECK_LT(index, origins_size);
ordered_origins[index] = &kv.second;
}
for (const OriginRequestSummary* summary : ordered_origins) {
auto* origin_to_add = data.add_origins();
InitializeOriginStatFromOriginRequestSummary(origin_to_add, *summary);
}
} else {
data.set_last_visit_time(base::Time::Now().ToInternalValue());
std::map<url::Origin, int> old_index;
int old_size = static_cast<int>(data.origins_size());
for (int i = 0; i < old_size; ++i) {
bool is_new =
old_index
.insert({url::Origin::Create(GURL(data.origins(i).origin())), i})
.second;
DCHECK(is_new);
}
for (int i = 0; i < old_size; ++i) {
auto* old_origin = data.mutable_origins(i);
url::Origin origin = url::Origin::Create(GURL(old_origin->origin()));
auto it = summaries.find(origin);
if (it == summaries.end()) {
old_origin->set_number_of_misses(old_origin->number_of_misses() + 1);
old_origin->set_consecutive_misses(old_origin->consecutive_misses() +
1);
} else {
const auto& new_origin = it->second;
old_origin->set_always_access_network(new_origin.always_access_network);
old_origin->set_accessed_network(new_origin.accessed_network);
int position = new_origin.first_occurrence + 1;
int total =
old_origin->number_of_hits() + old_origin->number_of_misses();
old_origin->set_average_position(
((old_origin->average_position() * total) + position) /
(total + 1));
old_origin->set_number_of_hits(old_origin->number_of_hits() + 1);
old_origin->set_consecutive_misses(0);
}
}
for (const auto& kv : summaries) {
if (old_index.find(kv.first) != old_index.end())
continue;
auto* origin_to_add = data.add_origins();
InitializeOriginStatFromOriginRequestSummary(origin_to_add, kv.second);
}
}
ResourcePrefetchPredictorTables::TrimOrigins(&data,
config_.max_consecutive_misses);
ResourcePrefetchPredictorTables::SortOrigins(&data, main_frame_origin.spec());
if (data.origins_size() > static_cast<int>(config_.max_origins_per_entry)) {
data.mutable_origins()->DeleteSubrange(
config_.max_origins_per_entry,
data.origins_size() - config_.max_origins_per_entry);
}
if (data.origins_size() == 0)
origin_data_->DeleteData({host});
else
origin_data_->UpdateData(host, data);
}
| 172,380 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool ChromeRenderMessageFilter::OnMessageReceived(const IPC::Message& message,
bool* message_was_ok) {
bool handled = true;
IPC_BEGIN_MESSAGE_MAP_EX(ChromeRenderMessageFilter, message, *message_was_ok)
#if !defined(DISABLE_NACL)
IPC_MESSAGE_HANDLER_DELAY_REPLY(ChromeViewHostMsg_LaunchNaCl, OnLaunchNaCl)
#endif
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_DnsPrefetch, OnDnsPrefetch)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_RendererHistograms,
OnRendererHistograms)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_ResourceTypeStats,
OnResourceTypeStats)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_UpdatedCacheStats,
OnUpdatedCacheStats)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_FPS, OnFPS)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_V8HeapStats, OnV8HeapStats)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_OpenChannelToExtension,
OnOpenChannelToExtension)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_OpenChannelToTab, OnOpenChannelToTab)
IPC_MESSAGE_HANDLER_DELAY_REPLY(ExtensionHostMsg_GetMessageBundle,
OnGetExtensionMessageBundle)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_AddListener, OnExtensionAddListener)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_RemoveListener,
OnExtensionRemoveListener)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_AddLazyListener,
OnExtensionAddLazyListener)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_RemoveLazyListener,
OnExtensionRemoveLazyListener)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_CloseChannel, OnExtensionCloseChannel)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_RequestForIOThread,
OnExtensionRequestForIOThread)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_ShouldUnloadAck,
OnExtensionShouldUnloadAck)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_GenerateUniqueID,
OnExtensionGenerateUniqueID)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_UnloadAck, OnExtensionUnloadAck)
#if defined(USE_TCMALLOC)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_WriteTcmallocHeapProfile_ACK,
OnWriteTcmallocHeapProfile)
#endif
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_AllowDatabase, OnAllowDatabase)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_AllowDOMStorage, OnAllowDOMStorage)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_AllowFileSystem, OnAllowFileSystem)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_AllowIndexedDB, OnAllowIndexedDB)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_CanTriggerClipboardRead,
OnCanTriggerClipboardRead)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_CanTriggerClipboardWrite,
OnCanTriggerClipboardWrite)
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
#if defined(ENABLE_AUTOMATION)
if ((message.type() == ChromeViewHostMsg_GetCookies::ID ||
message.type() == ChromeViewHostMsg_SetCookie::ID) &&
AutomationResourceMessageFilter::ShouldFilterCookieMessages(
render_process_id_, message.routing_id())) {
IPC_BEGIN_MESSAGE_MAP_EX(ChromeRenderMessageFilter, message,
*message_was_ok)
IPC_MESSAGE_HANDLER_DELAY_REPLY(ChromeViewHostMsg_GetCookies,
OnGetCookies)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_SetCookie, OnSetCookie)
IPC_END_MESSAGE_MAP()
handled = true;
}
#endif
return handled;
}
Commit Message: Make chrome.appWindow.create() provide access to the child window at a predictable time.
When you first create a window with chrome.appWindow.create(), it won't have
loaded any resources. So, at create time, you are guaranteed that:
child_window.location.href == 'about:blank'
child_window.document.documentElement.outerHTML ==
'<html><head></head><body></body></html>'
This is in line with the behaviour of window.open().
BUG=131735
TEST=browser_tests:PlatformAppBrowserTest.WindowsApi
Committed: http://src.chromium.org/viewvc/chrome?view=rev&revision=144072
Review URL: https://chromiumcodereview.appspot.com/10644006
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@144356 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | bool ChromeRenderMessageFilter::OnMessageReceived(const IPC::Message& message,
bool* message_was_ok) {
bool handled = true;
IPC_BEGIN_MESSAGE_MAP_EX(ChromeRenderMessageFilter, message, *message_was_ok)
#if !defined(DISABLE_NACL)
IPC_MESSAGE_HANDLER_DELAY_REPLY(ChromeViewHostMsg_LaunchNaCl, OnLaunchNaCl)
#endif
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_DnsPrefetch, OnDnsPrefetch)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_RendererHistograms,
OnRendererHistograms)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_ResourceTypeStats,
OnResourceTypeStats)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_UpdatedCacheStats,
OnUpdatedCacheStats)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_FPS, OnFPS)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_V8HeapStats, OnV8HeapStats)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_OpenChannelToExtension,
OnOpenChannelToExtension)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_OpenChannelToTab, OnOpenChannelToTab)
IPC_MESSAGE_HANDLER_DELAY_REPLY(ExtensionHostMsg_GetMessageBundle,
OnGetExtensionMessageBundle)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_AddListener, OnExtensionAddListener)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_RemoveListener,
OnExtensionRemoveListener)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_AddLazyListener,
OnExtensionAddLazyListener)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_RemoveLazyListener,
OnExtensionRemoveLazyListener)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_CloseChannel, OnExtensionCloseChannel)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_RequestForIOThread,
OnExtensionRequestForIOThread)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_ShouldUnloadAck,
OnExtensionShouldUnloadAck)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_GenerateUniqueID,
OnExtensionGenerateUniqueID)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_UnloadAck, OnExtensionUnloadAck)
IPC_MESSAGE_HANDLER(ExtensionHostMsg_ResumeRequests,
OnExtensionResumeRequests);
#if defined(USE_TCMALLOC)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_WriteTcmallocHeapProfile_ACK,
OnWriteTcmallocHeapProfile)
#endif
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_AllowDatabase, OnAllowDatabase)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_AllowDOMStorage, OnAllowDOMStorage)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_AllowFileSystem, OnAllowFileSystem)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_AllowIndexedDB, OnAllowIndexedDB)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_CanTriggerClipboardRead,
OnCanTriggerClipboardRead)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_CanTriggerClipboardWrite,
OnCanTriggerClipboardWrite)
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
#if defined(ENABLE_AUTOMATION)
if ((message.type() == ChromeViewHostMsg_GetCookies::ID ||
message.type() == ChromeViewHostMsg_SetCookie::ID) &&
AutomationResourceMessageFilter::ShouldFilterCookieMessages(
render_process_id_, message.routing_id())) {
IPC_BEGIN_MESSAGE_MAP_EX(ChromeRenderMessageFilter, message,
*message_was_ok)
IPC_MESSAGE_HANDLER_DELAY_REPLY(ChromeViewHostMsg_GetCookies,
OnGetCookies)
IPC_MESSAGE_HANDLER(ChromeViewHostMsg_SetCookie, OnSetCookie)
IPC_END_MESSAGE_MAP()
handled = true;
}
#endif
return handled;
}
| 170,812 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void TabletModeWindowState::LeaveTabletMode(wm::WindowState* window_state) {
EnterAnimationType animation_type =
window_state->IsSnapped() || IsTopWindow(window_state->window())
? DEFAULT
: IMMEDIATE;
if (old_state_->GetType() == window_state->GetStateType() &&
!window_state->IsNormalStateType()) {
animation_type = IMMEDIATE;
}
old_state_->set_enter_animation_type(animation_type);
std::unique_ptr<wm::WindowState::State> our_reference =
window_state->SetStateObject(std::move(old_state_));
}
Commit Message: Fix the crash after clamshell -> tablet transition in overview mode.
This CL just reverted some changes that were made in
https://chromium-review.googlesource.com/c/chromium/src/+/1658955. In
that CL, we changed the clamshell <-> tablet transition when clamshell
split view mode is enabled, however, we should keep the old behavior
unchanged if the feature is not enabled, i.e., overview should be ended
if it's active before the transition. Otherwise, it will cause a nullptr
dereference crash since |split_view_drag_indicators_| is not created in
clamshell overview and will be used in tablet overview.
Bug: 982507
Change-Id: I238fe9472648a446cff4ab992150658c228714dd
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1705474
Commit-Queue: Xiaoqian Dai <[email protected]>
Reviewed-by: Mitsuru Oshima (Slow - on/off site) <[email protected]>
Cr-Commit-Position: refs/heads/master@{#679306}
CWE ID: CWE-362 | void TabletModeWindowState::LeaveTabletMode(wm::WindowState* window_state) {
void TabletModeWindowState::LeaveTabletMode(wm::WindowState* window_state,
bool was_in_overview) {
// Only do bounds change animation if the window was showing in overview,
// or the top window or a window showing in splitview before leaving tablet
// mode, and the window has changed its state. Otherwise, restore its bounds
// immediately.
EnterAnimationType animation_type =
was_in_overview || window_state->IsSnapped() ||
IsTopWindow(window_state->window())
? DEFAULT
: IMMEDIATE;
if (old_state_->GetType() == window_state->GetStateType() &&
!window_state->IsNormalStateType()) {
animation_type = IMMEDIATE;
}
old_state_->set_enter_animation_type(animation_type);
std::unique_ptr<wm::WindowState::State> our_reference =
window_state->SetStateObject(std::move(old_state_));
}
| 172,403 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: PrintingContext::Result PrintingContextCairo::UpdatePrinterSettings(
const DictionaryValue& job_settings, const PageRanges& ranges) {
#if defined(OS_CHROMEOS)
bool landscape = false;
if (!job_settings.GetBoolean(kSettingLandscape, &landscape))
return OnError();
settings_.SetOrientation(landscape);
settings_.ranges = ranges;
return OK;
#else
DCHECK(!in_print_job_);
if (!print_dialog_->UpdateSettings(job_settings, ranges))
return OnError();
return OK;
#endif
}
Commit Message: Fix print preview workflow to reflect settings of selected printer.
BUG=95110
TEST=none
Review URL: http://codereview.chromium.org/7831041
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@102242 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | PrintingContext::Result PrintingContextCairo::UpdatePrinterSettings(
const DictionaryValue& job_settings, const PageRanges& ranges) {
#if defined(OS_CHROMEOS)
bool landscape = false;
if (!job_settings.GetBoolean(kSettingLandscape, &landscape))
return OnError();
settings_.SetOrientation(landscape);
settings_.ranges = ranges;
return OK;
#else
DCHECK(!in_print_job_);
if (!print_dialog_) {
print_dialog_ = create_dialog_func_(this);
print_dialog_->AddRefToDialog();
}
if (!print_dialog_->UpdateSettings(job_settings, ranges))
return OnError();
return OK;
#endif
}
| 170,267 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void SplitString(const std::wstring& str,
wchar_t c,
std::vector<std::wstring>* r) {
SplitStringT(str, c, true, r);
}
Commit Message: wstring: remove wstring version of SplitString
Retry of r84336.
BUG=23581
Review URL: http://codereview.chromium.org/6930047
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@84355 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | void SplitString(const std::wstring& str,
| 170,555 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int SeekHead::GetVoidElementCount() const
{
return m_void_element_count;
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | int SeekHead::GetVoidElementCount() const
| 174,381 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: t42_parse_charstrings( T42_Face face,
T42_Loader loader )
{
T42_Parser parser = &loader->parser;
PS_Table code_table = &loader->charstrings;
PS_Table name_table = &loader->glyph_names;
PS_Table swap_table = &loader->swap_table;
FT_Memory memory = parser->root.memory;
FT_Error error;
PSAux_Service psaux = (PSAux_Service)face->psaux;
FT_Byte* cur;
FT_Byte* limit = parser->root.limit;
FT_UInt n;
FT_UInt notdef_index = 0;
FT_Byte notdef_found = 0;
T1_Skip_Spaces( parser );
if ( parser->root.cursor >= limit )
{
FT_ERROR(( "t42_parse_charstrings: out of bounds\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
if ( ft_isdigit( *parser->root.cursor ) )
{
loader->num_glyphs = (FT_UInt)T1_ToInt( parser );
if ( parser->root.error )
return;
}
else if ( *parser->root.cursor == '<' )
{
/* We have `<< ... >>'. Count the number of `/' in the dictionary */
/* to get its size. */
FT_UInt count = 0;
T1_Skip_PS_Token( parser );
if ( parser->root.error )
return;
T1_Skip_Spaces( parser );
cur = parser->root.cursor;
while ( parser->root.cursor < limit )
{
if ( *parser->root.cursor == '/' )
count++;
else if ( *parser->root.cursor == '>' )
{
loader->num_glyphs = count;
parser->root.cursor = cur; /* rewind */
break;
}
T1_Skip_PS_Token( parser );
if ( parser->root.error )
return;
T1_Skip_Spaces( parser );
}
}
else
{
FT_ERROR(( "t42_parse_charstrings: invalid token\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
if ( parser->root.cursor >= limit )
{
FT_ERROR(( "t42_parse_charstrings: out of bounds\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
/* initialize tables */
error = psaux->ps_table_funcs->init( code_table,
loader->num_glyphs,
memory );
if ( error )
goto Fail;
error = psaux->ps_table_funcs->init( name_table,
loader->num_glyphs,
memory );
if ( error )
goto Fail;
/* Initialize table for swapping index notdef_index and */
/* index 0 names and codes (if necessary). */
error = psaux->ps_table_funcs->init( swap_table, 4, memory );
if ( error )
goto Fail;
n = 0;
for (;;)
{
/* The format is simple: */
/* `/glyphname' + index [+ def] */
T1_Skip_Spaces( parser );
cur = parser->root.cursor;
if ( cur >= limit )
break;
/* We stop when we find an `end' keyword or '>' */
if ( *cur == 'e' &&
cur + 3 < limit &&
cur[1] == 'n' &&
cur[2] == 'd' &&
t42_is_space( cur[3] ) )
break;
if ( *cur == '>' )
break;
T1_Skip_PS_Token( parser );
if ( parser->root.error )
return;
{
FT_ERROR(( "t42_parse_charstrings: out of bounds\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
cur++; /* skip `/' */
len = parser->root.cursor - cur;
error = T1_Add_Table( name_table, n, cur, len + 1 );
if ( error )
goto Fail;
/* add a trailing zero to the name table */
name_table->elements[n][len] = '\0';
/* record index of /.notdef */
if ( *cur == '.' &&
ft_strcmp( ".notdef",
(const char*)(name_table->elements[n]) ) == 0 )
{
notdef_index = n;
notdef_found = 1;
}
T1_Skip_Spaces( parser );
cur = parser->root.cursor;
(void)T1_ToInt( parser );
if ( parser->root.cursor >= limit )
{
FT_ERROR(( "t42_parse_charstrings: out of bounds\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
len = parser->root.cursor - cur;
error = T1_Add_Table( code_table, n, cur, len + 1 );
if ( error )
goto Fail;
code_table->elements[n][len] = '\0';
n++;
if ( n >= loader->num_glyphs )
break;
}
}
Commit Message:
CWE ID: CWE-119 | t42_parse_charstrings( T42_Face face,
T42_Loader loader )
{
T42_Parser parser = &loader->parser;
PS_Table code_table = &loader->charstrings;
PS_Table name_table = &loader->glyph_names;
PS_Table swap_table = &loader->swap_table;
FT_Memory memory = parser->root.memory;
FT_Error error;
PSAux_Service psaux = (PSAux_Service)face->psaux;
FT_Byte* cur;
FT_Byte* limit = parser->root.limit;
FT_UInt n;
FT_UInt notdef_index = 0;
FT_Byte notdef_found = 0;
T1_Skip_Spaces( parser );
if ( parser->root.cursor >= limit )
{
FT_ERROR(( "t42_parse_charstrings: out of bounds\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
if ( ft_isdigit( *parser->root.cursor ) )
{
loader->num_glyphs = (FT_UInt)T1_ToInt( parser );
if ( parser->root.error )
return;
}
else if ( *parser->root.cursor == '<' )
{
/* We have `<< ... >>'. Count the number of `/' in the dictionary */
/* to get its size. */
FT_UInt count = 0;
T1_Skip_PS_Token( parser );
if ( parser->root.error )
return;
T1_Skip_Spaces( parser );
cur = parser->root.cursor;
while ( parser->root.cursor < limit )
{
if ( *parser->root.cursor == '/' )
count++;
else if ( *parser->root.cursor == '>' )
{
loader->num_glyphs = count;
parser->root.cursor = cur; /* rewind */
break;
}
T1_Skip_PS_Token( parser );
if ( parser->root.error )
return;
T1_Skip_Spaces( parser );
}
}
else
{
FT_ERROR(( "t42_parse_charstrings: invalid token\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
if ( parser->root.cursor >= limit )
{
FT_ERROR(( "t42_parse_charstrings: out of bounds\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
/* initialize tables */
error = psaux->ps_table_funcs->init( code_table,
loader->num_glyphs,
memory );
if ( error )
goto Fail;
error = psaux->ps_table_funcs->init( name_table,
loader->num_glyphs,
memory );
if ( error )
goto Fail;
/* Initialize table for swapping index notdef_index and */
/* index 0 names and codes (if necessary). */
error = psaux->ps_table_funcs->init( swap_table, 4, memory );
if ( error )
goto Fail;
n = 0;
for (;;)
{
/* The format is simple: */
/* `/glyphname' + index [+ def] */
T1_Skip_Spaces( parser );
cur = parser->root.cursor;
if ( cur >= limit )
break;
/* We stop when we find an `end' keyword or '>' */
if ( *cur == 'e' &&
cur + 3 < limit &&
cur[1] == 'n' &&
cur[2] == 'd' &&
t42_is_space( cur[3] ) )
break;
if ( *cur == '>' )
break;
T1_Skip_PS_Token( parser );
if ( parser->root.cursor >= limit )
{
FT_ERROR(( "t42_parse_charstrings: out of bounds\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
if ( parser->root.error )
return;
{
FT_ERROR(( "t42_parse_charstrings: out of bounds\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
cur++; /* skip `/' */
len = parser->root.cursor - cur;
error = T1_Add_Table( name_table, n, cur, len + 1 );
if ( error )
goto Fail;
/* add a trailing zero to the name table */
name_table->elements[n][len] = '\0';
/* record index of /.notdef */
if ( *cur == '.' &&
ft_strcmp( ".notdef",
(const char*)(name_table->elements[n]) ) == 0 )
{
notdef_index = n;
notdef_found = 1;
}
T1_Skip_Spaces( parser );
cur = parser->root.cursor;
(void)T1_ToInt( parser );
if ( parser->root.cursor >= limit )
{
FT_ERROR(( "t42_parse_charstrings: out of bounds\n" ));
error = FT_THROW( Invalid_File_Format );
goto Fail;
}
len = parser->root.cursor - cur;
error = T1_Add_Table( code_table, n, cur, len + 1 );
if ( error )
goto Fail;
code_table->elements[n][len] = '\0';
n++;
if ( n >= loader->num_glyphs )
break;
}
}
| 164,858 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int jas_memdump(FILE *out, void *data, size_t len)
{
size_t i;
size_t j;
uchar *dp;
dp = data;
for (i = 0; i < len; i += 16) {
fprintf(out, "%04zx:", i);
for (j = 0; j < 16; ++j) {
if (i + j < len) {
fprintf(out, " %02x", dp[i + j]);
}
}
fprintf(out, "\n");
}
return 0;
}
Commit Message: The generation of the configuration file jas_config.h has been completely
reworked in order to avoid pollution of the global namespace.
Some problematic types like uchar, ulong, and friends have been replaced
with names with a jas_ prefix.
An option max_samples has been added to the BMP and JPEG decoders to
restrict the maximum size of image that they can decode. This change
was made as a (possibly temporary) fix to address security concerns.
A max_samples command-line option has also been added to imginfo.
Whether an image component (for jas_image_t) is stored in memory or on
disk is now based on the component size (rather than the image size).
Some debug log message were added.
Some new integer overflow checks were added.
Some new safe integer add/multiply functions were added.
More pre-C99 cruft was removed. JasPer has numerous "hacks" to
handle pre-C99 compilers. JasPer now assumes C99 support. So, this
pre-C99 cruft is unnecessary and can be removed.
The regression jasper-doublefree-mem_close.jpg has been re-enabled.
Theoretically, it should work more predictably now.
CWE ID: CWE-190 | int jas_memdump(FILE *out, void *data, size_t len)
{
size_t i;
size_t j;
jas_uchar *dp;
dp = data;
for (i = 0; i < len; i += 16) {
fprintf(out, "%04zx:", i);
for (j = 0; j < 16; ++j) {
if (i + j < len) {
fprintf(out, " %02x", dp[i + j]);
}
}
fprintf(out, "\n");
}
return 0;
}
| 168,682 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static Image *ReadYCBCRImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
const unsigned char
*pixels;
Image
*canvas_image,
*image;
MagickBooleanType
status;
MagickOffsetType
scene;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
register const Quantum
*p;
register ssize_t
i,
x;
register Quantum
*q;
size_t
length;
ssize_t
count,
y;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
if ((image->columns == 0) || (image->rows == 0))
ThrowReaderException(OptionError,"MustSpecifyImageSize");
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
SetImageColorspace(image,YCbCrColorspace,exception);
if (image_info->interlace != PartitionInterlace)
{
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
if (DiscardBlobBytes(image,image->offset) == MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
/*
Create virtual canvas to support cropping (i.e. image.rgb[100x100+10+20]).
*/
canvas_image=CloneImage(image,image->extract_info.width,1,MagickFalse,
exception);
(void) SetImageVirtualPixelMethod(canvas_image,BlackVirtualPixelMethod,
exception);
quantum_info=AcquireQuantumInfo(image_info,canvas_image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
quantum_type=RGBQuantum;
if (LocaleCompare(image_info->magick,"YCbCrA") == 0)
{
quantum_type=RGBAQuantum;
image->alpha_trait=BlendPixelTrait;
}
pixels=(const unsigned char *) NULL;
if (image_info->number_scenes != 0)
while (image->scene < image_info->scene)
{
/*
Skip to next image.
*/
image->scene++;
length=GetQuantumExtent(canvas_image,quantum_info,quantum_type);
for (y=0; y < (ssize_t) image->rows; y++)
{
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) length)
break;
}
}
count=0;
length=0;
scene=0;
do
{
/*
Read pixels to virtual canvas image then push to image.
*/
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(DestroyImageList(image));
}
SetImageColorspace(image,YCbCrColorspace,exception);
switch (image_info->interlace)
{
case NoInterlace:
default:
{
/*
No interlacing: YCbCrYCbCrYCbCrYCbCrYCbCrYCbCr...
*/
if (scene == 0)
{
length=GetQuantumExtent(canvas_image,quantum_info,quantum_type);
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,quantum_type,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=QueueAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,GetPixelRed(canvas_image,p),q);
SetPixelGreen(image,GetPixelGreen(canvas_image,p),q);
SetPixelBlue(image,GetPixelBlue(canvas_image,p),q);
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,GetPixelAlpha(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
break;
}
case LineInterlace:
{
static QuantumType
quantum_types[4] =
{
RedQuantum,
GreenQuantum,
BlueQuantum,
OpacityQuantum
};
/*
Line interlacing: YYY...CbCbCb...CrCrCr...YYY...CbCbCb...CrCrCr...
*/
if (scene == 0)
{
length=GetQuantumExtent(canvas_image,quantum_info,RedQuantum);
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
for (i=0; i < (image->alpha_trait != UndefinedPixelTrait ? 4 : 3); i++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
quantum_type=quantum_types[i];
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,quantum_type,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,
0,canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (quantum_type)
{
case RedQuantum:
{
SetPixelRed(image,GetPixelRed(canvas_image,p),q);
break;
}
case GreenQuantum:
{
SetPixelGreen(image,GetPixelGreen(canvas_image,p),q);
break;
}
case BlueQuantum:
{
SetPixelBlue(image,GetPixelBlue(canvas_image,p),q);
break;
}
case OpacityQuantum:
{
SetPixelAlpha(image,GetPixelAlpha(canvas_image,p),q);
break;
}
default:
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case PlaneInterlace:
{
/*
Plane interlacing: YYYYYY...CbCbCbCbCbCb...CrCrCrCrCrCr...
*/
if (scene == 0)
{
length=GetQuantumExtent(canvas_image,quantum_info,RedQuantum);
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,RedQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,GetPixelRed(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,1,5);
if (status == MagickFalse)
break;
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,GreenQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGreen(image,GetPixelGreen(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,2,5);
if (status == MagickFalse)
break;
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,BlueQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelBlue(image,GetPixelBlue(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,3,5);
if (status == MagickFalse)
break;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,AlphaQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,
canvas_image->extract_info.x,0,canvas_image->columns,1,
exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,GetPixelAlpha(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,4,5);
if (status == MagickFalse)
break;
}
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,5,5);
if (status == MagickFalse)
break;
}
break;
}
case PartitionInterlace:
{
/*
Partition interlacing: YYYYYY..., CbCbCbCbCbCb..., CrCrCrCrCrCr...
*/
AppendImageFormat("Y",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
if (DiscardBlobBytes(image,image->offset) == MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
length=GetQuantumExtent(canvas_image,quantum_info,RedQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,RedQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,GetPixelRed(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,1,5);
if (status == MagickFalse)
break;
}
(void) CloseBlob(image);
AppendImageFormat("Cb",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=GetQuantumExtent(canvas_image,quantum_info,GreenQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,GreenQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGreen(image,GetPixelGreen(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,2,5);
if (status == MagickFalse)
break;
}
(void) CloseBlob(image);
AppendImageFormat("Cr",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=GetQuantumExtent(canvas_image,quantum_info,BlueQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,BlueQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelBlue(image,GetPixelBlue(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,3,5);
if (status == MagickFalse)
break;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
(void) CloseBlob(image);
AppendImageFormat("A",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=GetQuantumExtent(canvas_image,quantum_info,AlphaQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,BlueQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,
canvas_image->extract_info.x,0,canvas_image->columns,1,
exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,GetPixelAlpha(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,4,5);
if (status == MagickFalse)
break;
}
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,5,5);
if (status == MagickFalse)
break;
}
break;
}
}
SetQuantumImageType(image,quantum_type);
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if (count == (ssize_t) length)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
scene++;
} while (count == (ssize_t) length);
quantum_info=DestroyQuantumInfo(quantum_info);
canvas_image=DestroyImage(canvas_image);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
Commit Message: fix memory leak in ReadYCBCRImage as SetImageExtent failure
CWE ID: CWE-772 | static Image *ReadYCBCRImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
const unsigned char
*pixels;
Image
*canvas_image,
*image;
MagickBooleanType
status;
MagickOffsetType
scene;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
register const Quantum
*p;
register ssize_t
i,
x;
register Quantum
*q;
size_t
length;
ssize_t
count,
y;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
if ((image->columns == 0) || (image->rows == 0))
ThrowReaderException(OptionError,"MustSpecifyImageSize");
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
SetImageColorspace(image,YCbCrColorspace,exception);
if (image_info->interlace != PartitionInterlace)
{
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
if (DiscardBlobBytes(image,image->offset) == MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
/*
Create virtual canvas to support cropping (i.e. image.rgb[100x100+10+20]).
*/
canvas_image=CloneImage(image,image->extract_info.width,1,MagickFalse,
exception);
(void) SetImageVirtualPixelMethod(canvas_image,BlackVirtualPixelMethod,
exception);
quantum_info=AcquireQuantumInfo(image_info,canvas_image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
quantum_type=RGBQuantum;
if (LocaleCompare(image_info->magick,"YCbCrA") == 0)
{
quantum_type=RGBAQuantum;
image->alpha_trait=BlendPixelTrait;
}
pixels=(const unsigned char *) NULL;
if (image_info->number_scenes != 0)
while (image->scene < image_info->scene)
{
/*
Skip to next image.
*/
image->scene++;
length=GetQuantumExtent(canvas_image,quantum_info,quantum_type);
for (y=0; y < (ssize_t) image->rows; y++)
{
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) length)
break;
}
}
count=0;
length=0;
scene=0;
do
{
/*
Read pixels to virtual canvas image then push to image.
*/
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
{
quantum_info=DestroyQuantumInfo(quantum_info);
canvas_image=DestroyImage(canvas_image);
return(DestroyImageList(image));
}
SetImageColorspace(image,YCbCrColorspace,exception);
switch (image_info->interlace)
{
case NoInterlace:
default:
{
/*
No interlacing: YCbCrYCbCrYCbCrYCbCrYCbCrYCbCr...
*/
if (scene == 0)
{
length=GetQuantumExtent(canvas_image,quantum_info,quantum_type);
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,quantum_type,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=QueueAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,GetPixelRed(canvas_image,p),q);
SetPixelGreen(image,GetPixelGreen(canvas_image,p),q);
SetPixelBlue(image,GetPixelBlue(canvas_image,p),q);
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,GetPixelAlpha(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
break;
}
case LineInterlace:
{
static QuantumType
quantum_types[4] =
{
RedQuantum,
GreenQuantum,
BlueQuantum,
OpacityQuantum
};
/*
Line interlacing: YYY...CbCbCb...CrCrCr...YYY...CbCbCb...CrCrCr...
*/
if (scene == 0)
{
length=GetQuantumExtent(canvas_image,quantum_info,RedQuantum);
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
for (i=0; i < (image->alpha_trait != UndefinedPixelTrait ? 4 : 3); i++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
quantum_type=quantum_types[i];
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,quantum_type,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,
0,canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (quantum_type)
{
case RedQuantum:
{
SetPixelRed(image,GetPixelRed(canvas_image,p),q);
break;
}
case GreenQuantum:
{
SetPixelGreen(image,GetPixelGreen(canvas_image,p),q);
break;
}
case BlueQuantum:
{
SetPixelBlue(image,GetPixelBlue(canvas_image,p),q);
break;
}
case OpacityQuantum:
{
SetPixelAlpha(image,GetPixelAlpha(canvas_image,p),q);
break;
}
default:
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case PlaneInterlace:
{
/*
Plane interlacing: YYYYYY...CbCbCbCbCbCb...CrCrCrCrCrCr...
*/
if (scene == 0)
{
length=GetQuantumExtent(canvas_image,quantum_info,RedQuantum);
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,RedQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,GetPixelRed(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,1,5);
if (status == MagickFalse)
break;
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,GreenQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGreen(image,GetPixelGreen(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,2,5);
if (status == MagickFalse)
break;
}
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,BlueQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelBlue(image,GetPixelBlue(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,3,5);
if (status == MagickFalse)
break;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,AlphaQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,
canvas_image->extract_info.x,0,canvas_image->columns,1,
exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,GetPixelAlpha(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,4,5);
if (status == MagickFalse)
break;
}
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,5,5);
if (status == MagickFalse)
break;
}
break;
}
case PartitionInterlace:
{
/*
Partition interlacing: YYYYYY..., CbCbCbCbCbCb..., CrCrCrCrCrCr...
*/
AppendImageFormat("Y",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
if (DiscardBlobBytes(image,image->offset) == MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
length=GetQuantumExtent(canvas_image,quantum_info,RedQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,RedQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,GetPixelRed(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,1,5);
if (status == MagickFalse)
break;
}
(void) CloseBlob(image);
AppendImageFormat("Cb",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=GetQuantumExtent(canvas_image,quantum_info,GreenQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,GreenQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGreen(image,GetPixelGreen(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,2,5);
if (status == MagickFalse)
break;
}
(void) CloseBlob(image);
AppendImageFormat("Cr",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=GetQuantumExtent(canvas_image,quantum_info,BlueQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,BlueQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,canvas_image->extract_info.x,0,
canvas_image->columns,1,exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelBlue(image,GetPixelBlue(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,3,5);
if (status == MagickFalse)
break;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
(void) CloseBlob(image);
AppendImageFormat("A",image->filename);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
canvas_image=DestroyImageList(canvas_image);
image=DestroyImageList(image);
return((Image *) NULL);
}
length=GetQuantumExtent(canvas_image,quantum_info,AlphaQuantum);
for (i=0; i < (ssize_t) scene; i++)
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
for (y=0; y < (ssize_t) image->extract_info.height; y++)
{
if (count != (ssize_t) length)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
q=GetAuthenticPixels(canvas_image,0,0,canvas_image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
length=ImportQuantumPixels(canvas_image,(CacheView *) NULL,
quantum_info,BlueQuantum,pixels,exception);
if (SyncAuthenticPixels(canvas_image,exception) == MagickFalse)
break;
if (((y-image->extract_info.y) >= 0) &&
((y-image->extract_info.y) < (ssize_t) image->rows))
{
p=GetVirtualPixels(canvas_image,
canvas_image->extract_info.x,0,canvas_image->columns,1,
exception);
q=GetAuthenticPixels(image,0,y-image->extract_info.y,
image->columns,1,exception);
if ((p == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,GetPixelAlpha(canvas_image,p),q);
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
pixels=(const unsigned char *) ReadBlobStream(image,length,
GetQuantumPixels(quantum_info),&count);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,4,5);
if (status == MagickFalse)
break;
}
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,5,5);
if (status == MagickFalse)
break;
}
break;
}
}
SetQuantumImageType(image,quantum_type);
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if (count == (ssize_t) length)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
scene++;
} while (count == (ssize_t) length);
quantum_info=DestroyQuantumInfo(quantum_info);
canvas_image=DestroyImage(canvas_image);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
| 167,739 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: LoginLibrary* CrosLibrary::GetLoginLibrary() {
return login_lib_.GetDefaultImpl(use_stub_impl_);
}
Commit Message: chromeos: Replace copy-and-pasted code with macros.
This replaces a bunch of duplicated-per-library cros
function definitions and comments.
BUG=none
TEST=built it
Review URL: http://codereview.chromium.org/6086007
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@70070 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-189 | LoginLibrary* CrosLibrary::GetLoginLibrary() {
| 170,625 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: ExtensionFunction::ResponseAction BluetoothSocketSendFunction::Run() {
DCHECK_CURRENTLY_ON(work_thread_id());
auto params = bluetooth_socket::Send::Params::Create(*args_);
EXTENSION_FUNCTION_VALIDATE(params.get());
io_buffer_size_ = params->data.size();
io_buffer_ = new net::WrappedIOBuffer(params->data.data());
BluetoothApiSocket* socket = GetSocket(params->socket_id);
if (!socket)
return RespondNow(Error(kSocketNotFoundError));
socket->Send(io_buffer_,
io_buffer_size_,
base::Bind(&BluetoothSocketSendFunction::OnSuccess, this),
base::Bind(&BluetoothSocketSendFunction::OnError, this));
return did_respond() ? AlreadyResponded() : RespondLater();
}
Commit Message: chrome.bluetoothSocket: Fix regression in send()
In https://crrev.com/c/997098, params_ was changed to a local variable,
but it needs to last longer than that since net::WrappedIOBuffer may use
the data after the local variable goes out of scope.
This CL changed it back to be an instance variable.
Bug: 851799
Change-Id: I392f8acaef4c6473d6ea4fbee7209445aa09112e
Reviewed-on: https://chromium-review.googlesource.com/1103676
Reviewed-by: Toni Barzic <[email protected]>
Commit-Queue: Sonny Sasaka <[email protected]>
Cr-Commit-Position: refs/heads/master@{#568137}
CWE ID: CWE-416 | ExtensionFunction::ResponseAction BluetoothSocketSendFunction::Run() {
DCHECK_CURRENTLY_ON(work_thread_id());
params_ = bluetooth_socket::Send::Params::Create(*args_);
EXTENSION_FUNCTION_VALIDATE(params_.get());
io_buffer_size_ = params_->data.size();
io_buffer_ = new net::WrappedIOBuffer(params_->data.data());
BluetoothApiSocket* socket = GetSocket(params_->socket_id);
if (!socket)
return RespondNow(Error(kSocketNotFoundError));
socket->Send(io_buffer_,
io_buffer_size_,
base::Bind(&BluetoothSocketSendFunction::OnSuccess, this),
base::Bind(&BluetoothSocketSendFunction::OnError, this));
return did_respond() ? AlreadyResponded() : RespondLater();
}
| 173,160 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: gss_wrap_iov (minor_status,
context_handle,
conf_req_flag,
qop_req,
conf_state,
iov,
iov_count)
OM_uint32 * minor_status;
gss_ctx_id_t context_handle;
int conf_req_flag;
gss_qop_t qop_req;
int * conf_state;
gss_iov_buffer_desc * iov;
int iov_count;
{
/* EXPORT DELETE START */
OM_uint32 status;
gss_union_ctx_id_t ctx;
gss_mechanism mech;
status = val_wrap_iov_args(minor_status, context_handle,
conf_req_flag, qop_req,
conf_state, iov, iov_count);
if (status != GSS_S_COMPLETE)
return (status);
/*
* select the approprate underlying mechanism routine and
* call it.
*/
ctx = (gss_union_ctx_id_t) context_handle;
mech = gssint_get_mechanism (ctx->mech_type);
if (mech) {
if (mech->gss_wrap_iov) {
status = mech->gss_wrap_iov(
minor_status,
ctx->internal_ctx_id,
conf_req_flag,
qop_req,
conf_state,
iov,
iov_count);
if (status != GSS_S_COMPLETE)
map_error(minor_status, mech);
} else
status = GSS_S_UNAVAILABLE;
return(status);
}
/* EXPORT DELETE END */
return (GSS_S_BAD_MECH);
}
Commit Message: Preserve GSS context on init/accept failure
After gss_init_sec_context() or gss_accept_sec_context() has created a
context, don't delete the mechglue context on failures from subsequent
calls, even if the mechanism deletes the mech-specific context (which
is allowed by RFC 2744 but not preferred). Check for union contexts
with no mechanism context in each GSS function which accepts a
gss_ctx_id_t.
CVE-2017-11462:
RFC 2744 permits a GSS-API implementation to delete an existing
security context on a second or subsequent call to
gss_init_sec_context() or gss_accept_sec_context() if the call results
in an error. This API behavior has been found to be dangerous,
leading to the possibility of memory errors in some callers. For
safety, GSS-API implementations should instead preserve existing
security contexts on error until the caller deletes them.
All versions of MIT krb5 prior to this change may delete acceptor
contexts on error. Versions 1.13.4 through 1.13.7, 1.14.1 through
1.14.5, and 1.15 through 1.15.1 may also delete initiator contexts on
error.
ticket: 8598 (new)
target_version: 1.15-next
target_version: 1.14-next
tags: pullup
CWE ID: CWE-415 | gss_wrap_iov (minor_status,
context_handle,
conf_req_flag,
qop_req,
conf_state,
iov,
iov_count)
OM_uint32 * minor_status;
gss_ctx_id_t context_handle;
int conf_req_flag;
gss_qop_t qop_req;
int * conf_state;
gss_iov_buffer_desc * iov;
int iov_count;
{
/* EXPORT DELETE START */
OM_uint32 status;
gss_union_ctx_id_t ctx;
gss_mechanism mech;
status = val_wrap_iov_args(minor_status, context_handle,
conf_req_flag, qop_req,
conf_state, iov, iov_count);
if (status != GSS_S_COMPLETE)
return (status);
/*
* select the approprate underlying mechanism routine and
* call it.
*/
ctx = (gss_union_ctx_id_t) context_handle;
if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT)
return (GSS_S_NO_CONTEXT);
mech = gssint_get_mechanism (ctx->mech_type);
if (mech) {
if (mech->gss_wrap_iov) {
status = mech->gss_wrap_iov(
minor_status,
ctx->internal_ctx_id,
conf_req_flag,
qop_req,
conf_state,
iov,
iov_count);
if (status != GSS_S_COMPLETE)
map_error(minor_status, mech);
} else
status = GSS_S_UNAVAILABLE;
return(status);
}
/* EXPORT DELETE END */
return (GSS_S_BAD_MECH);
}
| 168,031 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int ret;
int copylen;
ret = -EOPNOTSUPP;
if (m->msg_flags&MSG_OOB)
goto read_error;
m->msg_namelen = 0;
skb = skb_recv_datagram(sk, flags, 0 , &ret);
if (!skb)
goto read_error;
copylen = skb->len;
if (len < copylen) {
m->msg_flags |= MSG_TRUNC;
copylen = len;
}
ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen);
if (ret)
goto out_free;
ret = (flags & MSG_TRUNC) ? skb->len : copylen;
out_free:
skb_free_datagram(sk, skb);
caif_check_flow_release(sk);
return ret;
read_error:
return ret;
}
Commit Message: net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-20 | static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int ret;
int copylen;
ret = -EOPNOTSUPP;
if (m->msg_flags&MSG_OOB)
goto read_error;
skb = skb_recv_datagram(sk, flags, 0 , &ret);
if (!skb)
goto read_error;
copylen = skb->len;
if (len < copylen) {
m->msg_flags |= MSG_TRUNC;
copylen = len;
}
ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen);
if (ret)
goto out_free;
ret = (flags & MSG_TRUNC) ? skb->len : copylen;
out_free:
skb_free_datagram(sk, skb);
caif_check_flow_release(sk);
return ret;
read_error:
return ret;
}
| 166,496 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int StreamTcpPacket (ThreadVars *tv, Packet *p, StreamTcpThread *stt,
PacketQueue *pq)
{
SCEnter();
DEBUG_ASSERT_FLOW_LOCKED(p->flow);
SCLogDebug("p->pcap_cnt %"PRIu64, p->pcap_cnt);
/* assign the thread id to the flow */
if (unlikely(p->flow->thread_id == 0)) {
p->flow->thread_id = (FlowThreadId)tv->id;
#ifdef DEBUG
} else if (unlikely((FlowThreadId)tv->id != p->flow->thread_id)) {
SCLogDebug("wrong thread: flow has %u, we are %d", p->flow->thread_id, tv->id);
#endif
}
TcpSession *ssn = (TcpSession *)p->flow->protoctx;
/* track TCP flags */
if (ssn != NULL) {
ssn->tcp_packet_flags |= p->tcph->th_flags;
if (PKT_IS_TOSERVER(p))
ssn->client.tcp_flags |= p->tcph->th_flags;
else if (PKT_IS_TOCLIENT(p))
ssn->server.tcp_flags |= p->tcph->th_flags;
/* check if we need to unset the ASYNC flag */
if (ssn->flags & STREAMTCP_FLAG_ASYNC &&
ssn->client.tcp_flags != 0 &&
ssn->server.tcp_flags != 0)
{
SCLogDebug("ssn %p: removing ASYNC flag as we have packets on both sides", ssn);
ssn->flags &= ~STREAMTCP_FLAG_ASYNC;
}
}
/* update counters */
if ((p->tcph->th_flags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) {
StatsIncr(tv, stt->counter_tcp_synack);
} else if (p->tcph->th_flags & (TH_SYN)) {
StatsIncr(tv, stt->counter_tcp_syn);
}
if (p->tcph->th_flags & (TH_RST)) {
StatsIncr(tv, stt->counter_tcp_rst);
}
/* broken TCP http://ask.wireshark.org/questions/3183/acknowledgment-number-broken-tcp-the-acknowledge-field-is-nonzero-while-the-ack-flag-is-not-set */
if (!(p->tcph->th_flags & TH_ACK) && TCP_GET_ACK(p) != 0) {
StreamTcpSetEvent(p, STREAM_PKT_BROKEN_ACK);
}
/* If we are on IPS mode, and got a drop action triggered from
* the IP only module, or from a reassembled msg and/or from an
* applayer detection, then drop the rest of the packets of the
* same stream and avoid inspecting it any further */
if (StreamTcpCheckFlowDrops(p) == 1) {
SCLogDebug("This flow/stream triggered a drop rule");
FlowSetNoPacketInspectionFlag(p->flow);
DecodeSetNoPacketInspectionFlag(p);
StreamTcpDisableAppLayer(p->flow);
PACKET_DROP(p);
/* return the segments to the pool */
StreamTcpSessionPktFree(p);
SCReturnInt(0);
}
if (ssn == NULL || ssn->state == TCP_NONE) {
if (StreamTcpPacketStateNone(tv, p, stt, ssn, &stt->pseudo_queue) == -1) {
goto error;
}
if (ssn != NULL)
SCLogDebug("ssn->alproto %"PRIu16"", p->flow->alproto);
} else {
/* special case for PKT_PSEUDO_STREAM_END packets:
* bypass the state handling and various packet checks,
* we care about reassembly here. */
if (p->flags & PKT_PSEUDO_STREAM_END) {
if (PKT_IS_TOCLIENT(p)) {
ssn->client.last_ack = TCP_GET_ACK(p);
StreamTcpReassembleHandleSegment(tv, stt->ra_ctx, ssn,
&ssn->server, p, pq);
} else {
ssn->server.last_ack = TCP_GET_ACK(p);
StreamTcpReassembleHandleSegment(tv, stt->ra_ctx, ssn,
&ssn->client, p, pq);
}
/* straight to 'skip' as we already handled reassembly */
goto skip;
}
/* check if the packet is in right direction, when we missed the
SYN packet and picked up midstream session. */
if (ssn->flags & STREAMTCP_FLAG_MIDSTREAM_SYNACK)
StreamTcpPacketSwitchDir(ssn, p);
if (StreamTcpPacketIsKeepAlive(ssn, p) == 1) {
goto skip;
}
if (StreamTcpPacketIsKeepAliveACK(ssn, p) == 1) {
StreamTcpClearKeepAliveFlag(ssn, p);
goto skip;
}
StreamTcpClearKeepAliveFlag(ssn, p);
/* if packet is not a valid window update, check if it is perhaps
* a bad window update that we should ignore (and alert on) */
if (StreamTcpPacketIsFinShutdownAck(ssn, p) == 0)
if (StreamTcpPacketIsWindowUpdate(ssn, p) == 0)
if (StreamTcpPacketIsBadWindowUpdate(ssn,p))
goto skip;
switch (ssn->state) {
case TCP_SYN_SENT:
if(StreamTcpPacketStateSynSent(tv, p, stt, ssn, &stt->pseudo_queue)) {
goto error;
}
break;
case TCP_SYN_RECV:
if(StreamTcpPacketStateSynRecv(tv, p, stt, ssn, &stt->pseudo_queue)) {
goto error;
}
break;
case TCP_ESTABLISHED:
if(StreamTcpPacketStateEstablished(tv, p, stt, ssn, &stt->pseudo_queue)) {
goto error;
}
break;
case TCP_FIN_WAIT1:
if(StreamTcpPacketStateFinWait1(tv, p, stt, ssn, &stt->pseudo_queue)) {
goto error;
}
break;
case TCP_FIN_WAIT2:
if(StreamTcpPacketStateFinWait2(tv, p, stt, ssn, &stt->pseudo_queue)) {
goto error;
}
break;
case TCP_CLOSING:
if(StreamTcpPacketStateClosing(tv, p, stt, ssn, &stt->pseudo_queue)) {
goto error;
}
break;
case TCP_CLOSE_WAIT:
if(StreamTcpPacketStateCloseWait(tv, p, stt, ssn, &stt->pseudo_queue)) {
goto error;
}
break;
case TCP_LAST_ACK:
if(StreamTcpPacketStateLastAck(tv, p, stt, ssn, &stt->pseudo_queue)) {
goto error;
}
break;
case TCP_TIME_WAIT:
if(StreamTcpPacketStateTimeWait(tv, p, stt, ssn, &stt->pseudo_queue)) {
goto error;
}
break;
case TCP_CLOSED:
/* TCP session memory is not returned to pool until timeout. */
SCLogDebug("packet received on closed state");
break;
default:
SCLogDebug("packet received on default state");
break;
}
skip:
if (ssn->state >= TCP_ESTABLISHED) {
p->flags |= PKT_STREAM_EST;
}
}
/* deal with a pseudo packet that is created upon receiving a RST
* segment. To be sure we process both sides of the connection, we
* inject a fake packet into the system, forcing reassembly of the
* opposing direction.
* There should be only one, but to be sure we do a while loop. */
if (ssn != NULL) {
while (stt->pseudo_queue.len > 0) {
SCLogDebug("processing pseudo packet / stream end");
Packet *np = PacketDequeue(&stt->pseudo_queue);
if (np != NULL) {
/* process the opposing direction of the original packet */
if (PKT_IS_TOSERVER(np)) {
SCLogDebug("pseudo packet is to server");
StreamTcpReassembleHandleSegment(tv, stt->ra_ctx, ssn,
&ssn->client, np, NULL);
} else {
SCLogDebug("pseudo packet is to client");
StreamTcpReassembleHandleSegment(tv, stt->ra_ctx, ssn,
&ssn->server, np, NULL);
}
/* enqueue this packet so we inspect it in detect etc */
PacketEnqueue(pq, np);
}
SCLogDebug("processing pseudo packet / stream end done");
}
/* recalc the csum on the packet if it was modified */
if (p->flags & PKT_STREAM_MODIFIED) {
ReCalculateChecksum(p);
}
/* check for conditions that may make us not want to log this packet */
/* streams that hit depth */
if ((ssn->client.flags & STREAMTCP_STREAM_FLAG_DEPTH_REACHED) &&
(ssn->server.flags & STREAMTCP_STREAM_FLAG_DEPTH_REACHED))
{
/* we can call bypass callback, if enabled */
if (StreamTcpBypassEnabled()) {
PacketBypassCallback(p);
}
}
if ((ssn->client.flags & STREAMTCP_STREAM_FLAG_DEPTH_REACHED) ||
(ssn->server.flags & STREAMTCP_STREAM_FLAG_DEPTH_REACHED))
{
p->flags |= PKT_STREAM_NOPCAPLOG;
}
/* encrypted packets */
if ((PKT_IS_TOSERVER(p) && (ssn->client.flags & STREAMTCP_STREAM_FLAG_NOREASSEMBLY)) ||
(PKT_IS_TOCLIENT(p) && (ssn->server.flags & STREAMTCP_STREAM_FLAG_NOREASSEMBLY)))
{
p->flags |= PKT_STREAM_NOPCAPLOG;
}
if (ssn->flags & STREAMTCP_FLAG_BYPASS) {
/* we can call bypass callback, if enabled */
if (StreamTcpBypassEnabled()) {
PacketBypassCallback(p);
}
/* if stream is dead and we have no detect engine at all, bypass. */
} else if (g_detect_disabled &&
(ssn->client.flags & STREAMTCP_STREAM_FLAG_NOREASSEMBLY) &&
(ssn->server.flags & STREAMTCP_STREAM_FLAG_NOREASSEMBLY) &&
StreamTcpBypassEnabled())
{
SCLogDebug("bypass as stream is dead and we have no rules");
PacketBypassCallback(p);
}
}
SCReturnInt(0);
error:
/* make sure we don't leave packets in our pseudo queue */
while (stt->pseudo_queue.len > 0) {
Packet *np = PacketDequeue(&stt->pseudo_queue);
if (np != NULL) {
PacketEnqueue(pq, np);
}
}
/* recalc the csum on the packet if it was modified */
if (p->flags & PKT_STREAM_MODIFIED) {
ReCalculateChecksum(p);
}
if (StreamTcpInlineDropInvalid()) {
PACKET_DROP(p);
}
SCReturnInt(-1);
}
Commit Message: stream: still inspect packets dropped by stream
The detect engine would bypass packets that are set as dropped. This
seems sane, as these packets are going to be dropped anyway.
However, it lead to the following corner case: stream events that
triggered the drop could not be matched on the rules. The packet
with the event wouldn't make it to the detect engine due to the bypass.
This patch changes the logic to not bypass DROP packets anymore.
Packets that are dropped by the stream engine will set the no payload
inspection flag, so avoid needless cost.
CWE ID: CWE-693 | int StreamTcpPacket (ThreadVars *tv, Packet *p, StreamTcpThread *stt,
PacketQueue *pq)
{
SCEnter();
DEBUG_ASSERT_FLOW_LOCKED(p->flow);
SCLogDebug("p->pcap_cnt %"PRIu64, p->pcap_cnt);
/* assign the thread id to the flow */
if (unlikely(p->flow->thread_id == 0)) {
p->flow->thread_id = (FlowThreadId)tv->id;
#ifdef DEBUG
} else if (unlikely((FlowThreadId)tv->id != p->flow->thread_id)) {
SCLogDebug("wrong thread: flow has %u, we are %d", p->flow->thread_id, tv->id);
#endif
}
TcpSession *ssn = (TcpSession *)p->flow->protoctx;
/* track TCP flags */
if (ssn != NULL) {
ssn->tcp_packet_flags |= p->tcph->th_flags;
if (PKT_IS_TOSERVER(p))
ssn->client.tcp_flags |= p->tcph->th_flags;
else if (PKT_IS_TOCLIENT(p))
ssn->server.tcp_flags |= p->tcph->th_flags;
/* check if we need to unset the ASYNC flag */
if (ssn->flags & STREAMTCP_FLAG_ASYNC &&
ssn->client.tcp_flags != 0 &&
ssn->server.tcp_flags != 0)
{
SCLogDebug("ssn %p: removing ASYNC flag as we have packets on both sides", ssn);
ssn->flags &= ~STREAMTCP_FLAG_ASYNC;
}
}
/* update counters */
if ((p->tcph->th_flags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) {
StatsIncr(tv, stt->counter_tcp_synack);
} else if (p->tcph->th_flags & (TH_SYN)) {
StatsIncr(tv, stt->counter_tcp_syn);
}
if (p->tcph->th_flags & (TH_RST)) {
StatsIncr(tv, stt->counter_tcp_rst);
}
/* broken TCP http://ask.wireshark.org/questions/3183/acknowledgment-number-broken-tcp-the-acknowledge-field-is-nonzero-while-the-ack-flag-is-not-set */
if (!(p->tcph->th_flags & TH_ACK) && TCP_GET_ACK(p) != 0) {
StreamTcpSetEvent(p, STREAM_PKT_BROKEN_ACK);
}
/* If we are on IPS mode, and got a drop action triggered from
* the IP only module, or from a reassembled msg and/or from an
* applayer detection, then drop the rest of the packets of the
* same stream and avoid inspecting it any further */
if (StreamTcpCheckFlowDrops(p) == 1) {
SCLogDebug("This flow/stream triggered a drop rule");
FlowSetNoPacketInspectionFlag(p->flow);
DecodeSetNoPacketInspectionFlag(p);
StreamTcpDisableAppLayer(p->flow);
PACKET_DROP(p);
/* return the segments to the pool */
StreamTcpSessionPktFree(p);
SCReturnInt(0);
}
if (ssn == NULL || ssn->state == TCP_NONE) {
if (StreamTcpPacketStateNone(tv, p, stt, ssn, &stt->pseudo_queue) == -1) {
goto error;
}
if (ssn != NULL)
SCLogDebug("ssn->alproto %"PRIu16"", p->flow->alproto);
} else {
/* special case for PKT_PSEUDO_STREAM_END packets:
* bypass the state handling and various packet checks,
* we care about reassembly here. */
if (p->flags & PKT_PSEUDO_STREAM_END) {
if (PKT_IS_TOCLIENT(p)) {
ssn->client.last_ack = TCP_GET_ACK(p);
StreamTcpReassembleHandleSegment(tv, stt->ra_ctx, ssn,
&ssn->server, p, pq);
} else {
ssn->server.last_ack = TCP_GET_ACK(p);
StreamTcpReassembleHandleSegment(tv, stt->ra_ctx, ssn,
&ssn->client, p, pq);
}
/* straight to 'skip' as we already handled reassembly */
goto skip;
}
/* check if the packet is in right direction, when we missed the
SYN packet and picked up midstream session. */
if (ssn->flags & STREAMTCP_FLAG_MIDSTREAM_SYNACK)
StreamTcpPacketSwitchDir(ssn, p);
if (StreamTcpPacketIsKeepAlive(ssn, p) == 1) {
goto skip;
}
if (StreamTcpPacketIsKeepAliveACK(ssn, p) == 1) {
StreamTcpClearKeepAliveFlag(ssn, p);
goto skip;
}
StreamTcpClearKeepAliveFlag(ssn, p);
/* if packet is not a valid window update, check if it is perhaps
* a bad window update that we should ignore (and alert on) */
if (StreamTcpPacketIsFinShutdownAck(ssn, p) == 0)
if (StreamTcpPacketIsWindowUpdate(ssn, p) == 0)
if (StreamTcpPacketIsBadWindowUpdate(ssn,p))
goto skip;
switch (ssn->state) {
case TCP_SYN_SENT:
if(StreamTcpPacketStateSynSent(tv, p, stt, ssn, &stt->pseudo_queue)) {
goto error;
}
break;
case TCP_SYN_RECV:
if(StreamTcpPacketStateSynRecv(tv, p, stt, ssn, &stt->pseudo_queue)) {
goto error;
}
break;
case TCP_ESTABLISHED:
if(StreamTcpPacketStateEstablished(tv, p, stt, ssn, &stt->pseudo_queue)) {
goto error;
}
break;
case TCP_FIN_WAIT1:
if(StreamTcpPacketStateFinWait1(tv, p, stt, ssn, &stt->pseudo_queue)) {
goto error;
}
break;
case TCP_FIN_WAIT2:
if(StreamTcpPacketStateFinWait2(tv, p, stt, ssn, &stt->pseudo_queue)) {
goto error;
}
break;
case TCP_CLOSING:
if(StreamTcpPacketStateClosing(tv, p, stt, ssn, &stt->pseudo_queue)) {
goto error;
}
break;
case TCP_CLOSE_WAIT:
if(StreamTcpPacketStateCloseWait(tv, p, stt, ssn, &stt->pseudo_queue)) {
goto error;
}
break;
case TCP_LAST_ACK:
if(StreamTcpPacketStateLastAck(tv, p, stt, ssn, &stt->pseudo_queue)) {
goto error;
}
break;
case TCP_TIME_WAIT:
if(StreamTcpPacketStateTimeWait(tv, p, stt, ssn, &stt->pseudo_queue)) {
goto error;
}
break;
case TCP_CLOSED:
/* TCP session memory is not returned to pool until timeout. */
SCLogDebug("packet received on closed state");
break;
default:
SCLogDebug("packet received on default state");
break;
}
skip:
if (ssn->state >= TCP_ESTABLISHED) {
p->flags |= PKT_STREAM_EST;
}
}
/* deal with a pseudo packet that is created upon receiving a RST
* segment. To be sure we process both sides of the connection, we
* inject a fake packet into the system, forcing reassembly of the
* opposing direction.
* There should be only one, but to be sure we do a while loop. */
if (ssn != NULL) {
while (stt->pseudo_queue.len > 0) {
SCLogDebug("processing pseudo packet / stream end");
Packet *np = PacketDequeue(&stt->pseudo_queue);
if (np != NULL) {
/* process the opposing direction of the original packet */
if (PKT_IS_TOSERVER(np)) {
SCLogDebug("pseudo packet is to server");
StreamTcpReassembleHandleSegment(tv, stt->ra_ctx, ssn,
&ssn->client, np, NULL);
} else {
SCLogDebug("pseudo packet is to client");
StreamTcpReassembleHandleSegment(tv, stt->ra_ctx, ssn,
&ssn->server, np, NULL);
}
/* enqueue this packet so we inspect it in detect etc */
PacketEnqueue(pq, np);
}
SCLogDebug("processing pseudo packet / stream end done");
}
/* recalc the csum on the packet if it was modified */
if (p->flags & PKT_STREAM_MODIFIED) {
ReCalculateChecksum(p);
}
/* check for conditions that may make us not want to log this packet */
/* streams that hit depth */
if ((ssn->client.flags & STREAMTCP_STREAM_FLAG_DEPTH_REACHED) &&
(ssn->server.flags & STREAMTCP_STREAM_FLAG_DEPTH_REACHED))
{
/* we can call bypass callback, if enabled */
if (StreamTcpBypassEnabled()) {
PacketBypassCallback(p);
}
}
if ((ssn->client.flags & STREAMTCP_STREAM_FLAG_DEPTH_REACHED) ||
(ssn->server.flags & STREAMTCP_STREAM_FLAG_DEPTH_REACHED))
{
p->flags |= PKT_STREAM_NOPCAPLOG;
}
/* encrypted packets */
if ((PKT_IS_TOSERVER(p) && (ssn->client.flags & STREAMTCP_STREAM_FLAG_NOREASSEMBLY)) ||
(PKT_IS_TOCLIENT(p) && (ssn->server.flags & STREAMTCP_STREAM_FLAG_NOREASSEMBLY)))
{
p->flags |= PKT_STREAM_NOPCAPLOG;
}
if (ssn->flags & STREAMTCP_FLAG_BYPASS) {
/* we can call bypass callback, if enabled */
if (StreamTcpBypassEnabled()) {
PacketBypassCallback(p);
}
/* if stream is dead and we have no detect engine at all, bypass. */
} else if (g_detect_disabled &&
(ssn->client.flags & STREAMTCP_STREAM_FLAG_NOREASSEMBLY) &&
(ssn->server.flags & STREAMTCP_STREAM_FLAG_NOREASSEMBLY) &&
StreamTcpBypassEnabled())
{
SCLogDebug("bypass as stream is dead and we have no rules");
PacketBypassCallback(p);
}
}
SCReturnInt(0);
error:
/* make sure we don't leave packets in our pseudo queue */
while (stt->pseudo_queue.len > 0) {
Packet *np = PacketDequeue(&stt->pseudo_queue);
if (np != NULL) {
PacketEnqueue(pq, np);
}
}
/* recalc the csum on the packet if it was modified */
if (p->flags & PKT_STREAM_MODIFIED) {
ReCalculateChecksum(p);
}
if (StreamTcpInlineDropInvalid()) {
/* disable payload inspection as we're dropping this packet
* anyway. Doesn't disable all detection, so we can still
* match on the stream event that was set. */
DecodeSetNoPayloadInspectionFlag(p);
PACKET_DROP(p);
}
SCReturnInt(-1);
}
| 169,333 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: TIFFFlushData1(TIFF* tif)
{
if (tif->tif_rawcc > 0 && tif->tif_flags & TIFF_BUF4WRITE ) {
if (!isFillOrder(tif, tif->tif_dir.td_fillorder) &&
(tif->tif_flags & TIFF_NOBITREV) == 0)
TIFFReverseBits((uint8*)tif->tif_rawdata,
tif->tif_rawcc);
if (!TIFFAppendToStrip(tif,
isTiled(tif) ? tif->tif_curtile : tif->tif_curstrip,
tif->tif_rawdata, tif->tif_rawcc))
return (0);
tif->tif_rawcc = 0;
tif->tif_rawcp = tif->tif_rawdata;
}
return (1);
}
Commit Message: * tools/tiffcrop.c: fix various out-of-bounds write vulnerabilities
in heap or stack allocated buffers. Reported as MSVR 35093,
MSVR 35096 and MSVR 35097. Discovered by Axel Souchet and Vishal
Chauhan from the MSRC Vulnerabilities & Mitigations team.
* tools/tiff2pdf.c: fix out-of-bounds write vulnerabilities in
heap allocate buffer in t2p_process_jpeg_strip(). Reported as MSVR
35098. Discovered by Axel Souchet and Vishal Chauhan from the MSRC
Vulnerabilities & Mitigations team.
* libtiff/tif_pixarlog.c: fix out-of-bounds write vulnerabilities
in heap allocated buffers. Reported as MSVR 35094. Discovered by
Axel Souchet and Vishal Chauhan from the MSRC Vulnerabilities &
Mitigations team.
* libtiff/tif_write.c: fix issue in error code path of TIFFFlushData1()
that didn't reset the tif_rawcc and tif_rawcp members. I'm not
completely sure if that could happen in practice outside of the odd
behaviour of t2p_seekproc() of tiff2pdf). The report points that a
better fix could be to check the return value of TIFFFlushData1() in
places where it isn't done currently, but it seems this patch is enough.
Reported as MSVR 35095. Discovered by Axel Souchet & Vishal Chauhan &
Suha Can from the MSRC Vulnerabilities & Mitigations team.
CWE ID: CWE-787 | TIFFFlushData1(TIFF* tif)
{
if (tif->tif_rawcc > 0 && tif->tif_flags & TIFF_BUF4WRITE ) {
if (!isFillOrder(tif, tif->tif_dir.td_fillorder) &&
(tif->tif_flags & TIFF_NOBITREV) == 0)
TIFFReverseBits((uint8*)tif->tif_rawdata,
tif->tif_rawcc);
if (!TIFFAppendToStrip(tif,
isTiled(tif) ? tif->tif_curtile : tif->tif_curstrip,
tif->tif_rawdata, tif->tif_rawcc))
{
/* We update those variables even in case of error since there's */
/* code that doesn't really check the return code of this */
/* function */
tif->tif_rawcc = 0;
tif->tif_rawcp = tif->tif_rawdata;
return (0);
}
tif->tif_rawcc = 0;
tif->tif_rawcp = tif->tif_rawdata;
}
return (1);
}
| 166,871 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool WebMediaPlayerMS::DidGetOpaqueResponseFromServiceWorker() const {
DCHECK(thread_checker_.CalledOnValidThread());
return false;
}
Commit Message: Simplify "WouldTaintOrigin" concept in media/blink
Currently WebMediaPlayer has three predicates:
- DidGetOpaqueResponseFromServiceWorker
- HasSingleSecurityOrigin
- DidPassCORSAccessCheck
. These are used to determine whether the response body is available
for scripts. They are known to be confusing, and actually
MediaElementAudioSourceHandler::WouldTaintOrigin misuses them.
This CL merges the three predicates to one, WouldTaintOrigin, to remove
the confusion. Now the "response type" concept is available and we
don't need a custom CORS check, so this CL removes
BaseAudioContext::WouldTaintOrigin. This CL also renames
URLData::has_opaque_data_ and its (direct and indirect) data accessors
to match the spec.
Bug: 849942, 875153
Change-Id: I6acf50169d7445c4ff614e80ac606f79ee577d2a
Reviewed-on: https://chromium-review.googlesource.com/c/1238098
Reviewed-by: Fredrik Hubinette <[email protected]>
Reviewed-by: Kinuko Yasuda <[email protected]>
Reviewed-by: Raymond Toy <[email protected]>
Commit-Queue: Yutaka Hirano <[email protected]>
Cr-Commit-Position: refs/heads/master@{#598258}
CWE ID: CWE-732 | bool WebMediaPlayerMS::DidGetOpaqueResponseFromServiceWorker() const {
bool WebMediaPlayerMS::WouldTaintOrigin() const {
DCHECK(thread_checker_.CalledOnValidThread());
return false;
}
| 172,620 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void DocumentLoader::InstallNewDocument(
const KURL& url,
Document* owner_document,
bool should_reuse_default_view,
const AtomicString& mime_type,
const AtomicString& encoding,
InstallNewDocumentReason reason,
ParserSynchronizationPolicy parsing_policy,
const KURL& overriding_url) {
DCHECK(!frame_->GetDocument() || !frame_->GetDocument()->IsActive());
DCHECK_EQ(frame_->Tree().ChildCount(), 0u);
if (GetFrameLoader().StateMachine()->IsDisplayingInitialEmptyDocument()) {
GetFrameLoader().StateMachine()->AdvanceTo(
FrameLoaderStateMachine::kCommittedFirstRealLoad);
}
SecurityOrigin* previous_security_origin = nullptr;
if (frame_->GetDocument())
previous_security_origin = frame_->GetDocument()->GetSecurityOrigin();
if (!should_reuse_default_view)
frame_->SetDOMWindow(LocalDOMWindow::Create(*frame_));
bool user_gesture_bit_set = frame_->HasReceivedUserGesture() ||
frame_->HasReceivedUserGestureBeforeNavigation();
if (reason == InstallNewDocumentReason::kNavigation)
WillCommitNavigation();
Document* document = frame_->DomWindow()->InstallNewDocument(
mime_type,
DocumentInit::Create()
.WithFrame(frame_)
.WithURL(url)
.WithOwnerDocument(owner_document)
.WithNewRegistrationContext(),
false);
if (user_gesture_bit_set) {
frame_->SetDocumentHasReceivedUserGestureBeforeNavigation(
ShouldPersistUserGestureValue(previous_security_origin,
document->GetSecurityOrigin()));
if (frame_->IsMainFrame())
frame_->ClearDocumentHasReceivedUserGesture();
}
if (ShouldClearWindowName(*frame_, previous_security_origin, *document)) {
frame_->Tree().ExperimentalSetNulledName();
}
frame_->GetPage()->GetChromeClient().InstallSupplements(*frame_);
if (!overriding_url.IsEmpty())
document->SetBaseURLOverride(overriding_url);
DidInstallNewDocument(document);
if (reason == InstallNewDocumentReason::kNavigation)
DidCommitNavigation();
writer_ =
DocumentWriter::Create(document, parsing_policy, mime_type, encoding);
document->SetFeaturePolicy(
RuntimeEnabledFeatures::FeaturePolicyExperimentalFeaturesEnabled()
? response_.HttpHeaderField(HTTPNames::Feature_Policy)
: g_empty_string);
GetFrameLoader().DispatchDidClearDocumentOfWindowObject();
}
Commit Message: Inherit CSP when we inherit the security origin
This prevents attacks that use main window navigation to get out of the
existing csp constraints such as the related bug
Bug: 747847
Change-Id: I1e57b50da17f65d38088205b0a3c7c49ef2ae4d8
Reviewed-on: https://chromium-review.googlesource.com/592027
Reviewed-by: Mike West <[email protected]>
Commit-Queue: Andy Paicu <[email protected]>
Cr-Commit-Position: refs/heads/master@{#492333}
CWE ID: CWE-732 | void DocumentLoader::InstallNewDocument(
const KURL& url,
Document* owner_document,
bool should_reuse_default_view,
const AtomicString& mime_type,
const AtomicString& encoding,
InstallNewDocumentReason reason,
ParserSynchronizationPolicy parsing_policy,
const KURL& overriding_url) {
DCHECK(!frame_->GetDocument() || !frame_->GetDocument()->IsActive());
DCHECK_EQ(frame_->Tree().ChildCount(), 0u);
if (GetFrameLoader().StateMachine()->IsDisplayingInitialEmptyDocument()) {
GetFrameLoader().StateMachine()->AdvanceTo(
FrameLoaderStateMachine::kCommittedFirstRealLoad);
}
SecurityOrigin* previous_security_origin = nullptr;
if (frame_->GetDocument())
previous_security_origin = frame_->GetDocument()->GetSecurityOrigin();
if (!should_reuse_default_view)
frame_->SetDOMWindow(LocalDOMWindow::Create(*frame_));
bool user_gesture_bit_set = frame_->HasReceivedUserGesture() ||
frame_->HasReceivedUserGestureBeforeNavigation();
if (reason == InstallNewDocumentReason::kNavigation)
WillCommitNavigation();
Document* document = frame_->DomWindow()->InstallNewDocument(
mime_type,
DocumentInit::Create()
.WithFrame(frame_)
.WithURL(url)
.WithOwnerDocument(owner_document)
.WithNewRegistrationContext(),
false);
if (user_gesture_bit_set) {
frame_->SetDocumentHasReceivedUserGestureBeforeNavigation(
ShouldPersistUserGestureValue(previous_security_origin,
document->GetSecurityOrigin()));
if (frame_->IsMainFrame())
frame_->ClearDocumentHasReceivedUserGesture();
}
if (ShouldClearWindowName(*frame_, previous_security_origin, *document)) {
frame_->Tree().ExperimentalSetNulledName();
}
frame_->GetPage()->GetChromeClient().InstallSupplements(*frame_);
if (!overriding_url.IsEmpty())
document->SetBaseURLOverride(overriding_url);
DidInstallNewDocument(document, reason);
if (reason == InstallNewDocumentReason::kNavigation)
DidCommitNavigation();
writer_ =
DocumentWriter::Create(document, parsing_policy, mime_type, encoding);
document->SetFeaturePolicy(
RuntimeEnabledFeatures::FeaturePolicyExperimentalFeaturesEnabled()
? response_.HttpHeaderField(HTTPNames::Feature_Policy)
: g_empty_string);
GetFrameLoader().DispatchDidClearDocumentOfWindowObject();
}
| 172,303 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: TEE_Result syscall_cryp_obj_populate(unsigned long obj,
struct utee_attribute *usr_attrs,
unsigned long attr_count)
{
TEE_Result res;
struct tee_ta_session *sess;
struct tee_obj *o;
const struct tee_cryp_obj_type_props *type_props;
TEE_Attribute *attrs = NULL;
res = tee_ta_get_current_session(&sess);
if (res != TEE_SUCCESS)
return res;
res = tee_obj_get(to_user_ta_ctx(sess->ctx),
tee_svc_uref_to_vaddr(obj), &o);
if (res != TEE_SUCCESS)
return res;
/* Must be a transient object */
if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0)
return TEE_ERROR_BAD_PARAMETERS;
/* Must not be initialized already */
if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0)
return TEE_ERROR_BAD_PARAMETERS;
type_props = tee_svc_find_type_props(o->info.objectType);
if (!type_props)
return TEE_ERROR_NOT_IMPLEMENTED;
attrs = malloc(sizeof(TEE_Attribute) * attr_count);
if (!attrs)
return TEE_ERROR_OUT_OF_MEMORY;
res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_attrs, attr_count,
attrs);
if (res != TEE_SUCCESS)
goto out;
res = tee_svc_cryp_check_attr(ATTR_USAGE_POPULATE, type_props,
attrs, attr_count);
if (res != TEE_SUCCESS)
goto out;
res = tee_svc_cryp_obj_populate_type(o, type_props, attrs, attr_count);
if (res == TEE_SUCCESS)
o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED;
out:
free(attrs);
return res;
}
Commit Message: svc: check for allocation overflow in syscall_cryp_obj_populate
Without checking for overflow there is a risk of allocating a buffer
with size smaller than anticipated and as a consequence of that it might
lead to a heap based overflow with attacker controlled data written
outside the boundaries of the buffer.
Fixes: OP-TEE-2018-0009: "Integer overflow in crypto system calls"
Signed-off-by: Joakim Bech <[email protected]>
Tested-by: Joakim Bech <[email protected]> (QEMU v7, v8)
Reviewed-by: Jens Wiklander <[email protected]>
Reported-by: Riscure <[email protected]>
Reported-by: Alyssa Milburn <[email protected]>
Acked-by: Etienne Carriere <[email protected]>
CWE ID: CWE-119 | TEE_Result syscall_cryp_obj_populate(unsigned long obj,
struct utee_attribute *usr_attrs,
unsigned long attr_count)
{
TEE_Result res;
struct tee_ta_session *sess;
struct tee_obj *o;
const struct tee_cryp_obj_type_props *type_props;
TEE_Attribute *attrs = NULL;
res = tee_ta_get_current_session(&sess);
if (res != TEE_SUCCESS)
return res;
res = tee_obj_get(to_user_ta_ctx(sess->ctx),
tee_svc_uref_to_vaddr(obj), &o);
if (res != TEE_SUCCESS)
return res;
/* Must be a transient object */
if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0)
return TEE_ERROR_BAD_PARAMETERS;
/* Must not be initialized already */
if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0)
return TEE_ERROR_BAD_PARAMETERS;
type_props = tee_svc_find_type_props(o->info.objectType);
if (!type_props)
return TEE_ERROR_NOT_IMPLEMENTED;
size_t alloc_size = 0;
if (MUL_OVERFLOW(sizeof(TEE_Attribute), attr_count, &alloc_size))
return TEE_ERROR_OVERFLOW;
attrs = malloc(alloc_size);
if (!attrs)
return TEE_ERROR_OUT_OF_MEMORY;
res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_attrs, attr_count,
attrs);
if (res != TEE_SUCCESS)
goto out;
res = tee_svc_cryp_check_attr(ATTR_USAGE_POPULATE, type_props,
attrs, attr_count);
if (res != TEE_SUCCESS)
goto out;
res = tee_svc_cryp_obj_populate_type(o, type_props, attrs, attr_count);
if (res == TEE_SUCCESS)
o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED;
out:
free(attrs);
return res;
}
| 169,469 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void SSLManager::OnSSLCertificateError(
base::WeakPtr<SSLErrorHandler::Delegate> delegate,
const content::GlobalRequestID& id,
const ResourceType::Type resource_type,
const GURL& url,
int render_process_id,
int render_view_id,
const net::SSLInfo& ssl_info,
bool fatal) {
DCHECK(delegate);
DVLOG(1) << "OnSSLCertificateError() cert_error: "
<< net::MapCertStatusToNetError(ssl_info.cert_status)
<< " id: " << id.child_id << "," << id.request_id
<< " resource_type: " << resource_type
<< " url: " << url.spec()
<< " render_process_id: " << render_process_id
<< " render_view_id: " << render_view_id
<< " cert_status: " << std::hex << ssl_info.cert_status;
BrowserThread::PostTask(
BrowserThread::UI, FROM_HERE,
base::Bind(&SSLCertErrorHandler::Dispatch,
new SSLCertErrorHandler(delegate,
id,
resource_type,
url,
render_process_id,
render_view_id,
ssl_info,
fatal)));
}
Commit Message: Inherits SupportsWeakPtr<T> instead of having WeakPtrFactory<T>
This change refines r137676.
BUG=122654
TEST=browser_test
Review URL: https://chromiumcodereview.appspot.com/10332233
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@139771 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | void SSLManager::OnSSLCertificateError(
const base::WeakPtr<SSLErrorHandler::Delegate>& delegate,
const content::GlobalRequestID& id,
const ResourceType::Type resource_type,
const GURL& url,
int render_process_id,
int render_view_id,
const net::SSLInfo& ssl_info,
bool fatal) {
DCHECK(delegate);
DVLOG(1) << "OnSSLCertificateError() cert_error: "
<< net::MapCertStatusToNetError(ssl_info.cert_status)
<< " id: " << id.child_id << "," << id.request_id
<< " resource_type: " << resource_type
<< " url: " << url.spec()
<< " render_process_id: " << render_process_id
<< " render_view_id: " << render_view_id
<< " cert_status: " << std::hex << ssl_info.cert_status;
BrowserThread::PostTask(
BrowserThread::UI, FROM_HERE,
base::Bind(&SSLCertErrorHandler::Dispatch,
new SSLCertErrorHandler(delegate,
id,
resource_type,
url,
render_process_id,
render_view_id,
ssl_info,
fatal)));
}
| 170,996 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: gss_process_context_token (minor_status,
context_handle,
token_buffer)
OM_uint32 * minor_status;
gss_ctx_id_t context_handle;
gss_buffer_t token_buffer;
{
OM_uint32 status;
gss_union_ctx_id_t ctx;
gss_mechanism mech;
if (minor_status == NULL)
return (GSS_S_CALL_INACCESSIBLE_WRITE);
*minor_status = 0;
if (context_handle == GSS_C_NO_CONTEXT)
return (GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT);
if (token_buffer == GSS_C_NO_BUFFER)
return (GSS_S_CALL_INACCESSIBLE_READ);
if (GSS_EMPTY_BUFFER(token_buffer))
return (GSS_S_CALL_INACCESSIBLE_READ);
/*
* select the approprate underlying mechanism routine and
* call it.
*/
ctx = (gss_union_ctx_id_t) context_handle;
mech = gssint_get_mechanism (ctx->mech_type);
if (mech) {
if (mech->gss_process_context_token) {
status = mech->gss_process_context_token(
minor_status,
ctx->internal_ctx_id,
token_buffer);
if (status != GSS_S_COMPLETE)
map_error(minor_status, mech);
} else
status = GSS_S_UNAVAILABLE;
return(status);
}
return (GSS_S_BAD_MECH);
}
Commit Message: Preserve GSS context on init/accept failure
After gss_init_sec_context() or gss_accept_sec_context() has created a
context, don't delete the mechglue context on failures from subsequent
calls, even if the mechanism deletes the mech-specific context (which
is allowed by RFC 2744 but not preferred). Check for union contexts
with no mechanism context in each GSS function which accepts a
gss_ctx_id_t.
CVE-2017-11462:
RFC 2744 permits a GSS-API implementation to delete an existing
security context on a second or subsequent call to
gss_init_sec_context() or gss_accept_sec_context() if the call results
in an error. This API behavior has been found to be dangerous,
leading to the possibility of memory errors in some callers. For
safety, GSS-API implementations should instead preserve existing
security contexts on error until the caller deletes them.
All versions of MIT krb5 prior to this change may delete acceptor
contexts on error. Versions 1.13.4 through 1.13.7, 1.14.1 through
1.14.5, and 1.15 through 1.15.1 may also delete initiator contexts on
error.
ticket: 8598 (new)
target_version: 1.15-next
target_version: 1.14-next
tags: pullup
CWE ID: CWE-415 | gss_process_context_token (minor_status,
context_handle,
token_buffer)
OM_uint32 * minor_status;
gss_ctx_id_t context_handle;
gss_buffer_t token_buffer;
{
OM_uint32 status;
gss_union_ctx_id_t ctx;
gss_mechanism mech;
if (minor_status == NULL)
return (GSS_S_CALL_INACCESSIBLE_WRITE);
*minor_status = 0;
if (context_handle == GSS_C_NO_CONTEXT)
return (GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT);
if (token_buffer == GSS_C_NO_BUFFER)
return (GSS_S_CALL_INACCESSIBLE_READ);
if (GSS_EMPTY_BUFFER(token_buffer))
return (GSS_S_CALL_INACCESSIBLE_READ);
/*
* select the approprate underlying mechanism routine and
* call it.
*/
ctx = (gss_union_ctx_id_t) context_handle;
if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT)
return (GSS_S_NO_CONTEXT);
mech = gssint_get_mechanism (ctx->mech_type);
if (mech) {
if (mech->gss_process_context_token) {
status = mech->gss_process_context_token(
minor_status,
ctx->internal_ctx_id,
token_buffer);
if (status != GSS_S_COMPLETE)
map_error(minor_status, mech);
} else
status = GSS_S_UNAVAILABLE;
return(status);
}
return (GSS_S_BAD_MECH);
}
| 168,019 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: jp2_box_t *jp2_box_create(int type)
{
jp2_box_t *box;
jp2_boxinfo_t *boxinfo;
if (!(box = jas_malloc(sizeof(jp2_box_t)))) {
return 0;
}
memset(box, 0, sizeof(jp2_box_t));
box->type = type;
box->len = 0;
if (!(boxinfo = jp2_boxinfolookup(type))) {
return 0;
}
box->info = boxinfo;
box->ops = &boxinfo->ops;
return box;
}
Commit Message: Fixed bugs due to uninitialized data in the JP2 decoder.
Also, added some comments marking I/O stream interfaces that probably
need to be changed (in the long term) to fix integer overflow problems.
CWE ID: CWE-476 | jp2_box_t *jp2_box_create(int type)
jp2_box_t *jp2_box_create0()
{
jp2_box_t *box;
if (!(box = jas_malloc(sizeof(jp2_box_t)))) {
return 0;
}
memset(box, 0, sizeof(jp2_box_t));
box->type = 0;
box->len = 0;
// Mark the box data as never having been constructed
// so that we will not errantly attempt to destroy it later.
box->ops = &jp2_boxinfo_unk.ops;
return box;
}
jp2_box_t *jp2_box_create(int type)
{
jp2_box_t *box;
jp2_boxinfo_t *boxinfo;
if (!(box = jp2_box_create0())) {
return 0;
}
box->type = type;
box->len = 0;
if (!(boxinfo = jp2_boxinfolookup(type))) {
return 0;
}
box->info = boxinfo;
box->ops = &boxinfo->ops;
return box;
}
| 168,317 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: pgm_print(netdissect_options *ndo,
register const u_char *bp, register u_int length,
register const u_char *bp2)
{
register const struct pgm_header *pgm;
register const struct ip *ip;
register char ch;
uint16_t sport, dport;
u_int nla_afnum;
char nla_buf[INET6_ADDRSTRLEN];
register const struct ip6_hdr *ip6;
uint8_t opt_type, opt_len;
uint32_t seq, opts_len, len, offset;
pgm = (const struct pgm_header *)bp;
ip = (const struct ip *)bp2;
if (IP_V(ip) == 6)
ip6 = (const struct ip6_hdr *)bp2;
else
ip6 = NULL;
ch = '\0';
if (!ND_TTEST(pgm->pgm_dport)) {
if (ip6) {
ND_PRINT((ndo, "%s > %s: [|pgm]",
ip6addr_string(ndo, &ip6->ip6_src),
ip6addr_string(ndo, &ip6->ip6_dst)));
return;
} else {
ND_PRINT((ndo, "%s > %s: [|pgm]",
ipaddr_string(ndo, &ip->ip_src),
ipaddr_string(ndo, &ip->ip_dst)));
return;
}
}
sport = EXTRACT_16BITS(&pgm->pgm_sport);
dport = EXTRACT_16BITS(&pgm->pgm_dport);
if (ip6) {
if (ip6->ip6_nxt == IPPROTO_PGM) {
ND_PRINT((ndo, "%s.%s > %s.%s: ",
ip6addr_string(ndo, &ip6->ip6_src),
tcpport_string(ndo, sport),
ip6addr_string(ndo, &ip6->ip6_dst),
tcpport_string(ndo, dport)));
} else {
ND_PRINT((ndo, "%s > %s: ",
tcpport_string(ndo, sport), tcpport_string(ndo, dport)));
}
} else {
if (ip->ip_p == IPPROTO_PGM) {
ND_PRINT((ndo, "%s.%s > %s.%s: ",
ipaddr_string(ndo, &ip->ip_src),
tcpport_string(ndo, sport),
ipaddr_string(ndo, &ip->ip_dst),
tcpport_string(ndo, dport)));
} else {
ND_PRINT((ndo, "%s > %s: ",
tcpport_string(ndo, sport), tcpport_string(ndo, dport)));
}
}
ND_TCHECK(*pgm);
ND_PRINT((ndo, "PGM, length %u", EXTRACT_16BITS(&pgm->pgm_length)));
if (!ndo->ndo_vflag)
return;
ND_PRINT((ndo, " 0x%02x%02x%02x%02x%02x%02x ",
pgm->pgm_gsid[0],
pgm->pgm_gsid[1],
pgm->pgm_gsid[2],
pgm->pgm_gsid[3],
pgm->pgm_gsid[4],
pgm->pgm_gsid[5]));
switch (pgm->pgm_type) {
case PGM_SPM: {
const struct pgm_spm *spm;
spm = (const struct pgm_spm *)(pgm + 1);
ND_TCHECK(*spm);
bp = (const u_char *) (spm + 1);
switch (EXTRACT_16BITS(&spm->pgms_nla_afi)) {
case AFNUM_INET:
ND_TCHECK2(*bp, sizeof(struct in_addr));
addrtostr(bp, nla_buf, sizeof(nla_buf));
bp += sizeof(struct in_addr);
break;
case AFNUM_INET6:
ND_TCHECK2(*bp, sizeof(struct in6_addr));
addrtostr6(bp, nla_buf, sizeof(nla_buf));
bp += sizeof(struct in6_addr);
break;
default:
goto trunc;
break;
}
ND_PRINT((ndo, "SPM seq %u trail %u lead %u nla %s",
EXTRACT_32BITS(&spm->pgms_seq),
EXTRACT_32BITS(&spm->pgms_trailseq),
EXTRACT_32BITS(&spm->pgms_leadseq),
nla_buf));
break;
}
case PGM_POLL: {
const struct pgm_poll *poll_msg;
poll_msg = (const struct pgm_poll *)(pgm + 1);
ND_TCHECK(*poll_msg);
ND_PRINT((ndo, "POLL seq %u round %u",
EXTRACT_32BITS(&poll_msg->pgmp_seq),
EXTRACT_16BITS(&poll_msg->pgmp_round)));
bp = (const u_char *) (poll_msg + 1);
break;
}
case PGM_POLR: {
const struct pgm_polr *polr;
uint32_t ivl, rnd, mask;
polr = (const struct pgm_polr *)(pgm + 1);
ND_TCHECK(*polr);
bp = (const u_char *) (polr + 1);
switch (EXTRACT_16BITS(&polr->pgmp_nla_afi)) {
case AFNUM_INET:
ND_TCHECK2(*bp, sizeof(struct in_addr));
addrtostr(bp, nla_buf, sizeof(nla_buf));
bp += sizeof(struct in_addr);
break;
case AFNUM_INET6:
ND_TCHECK2(*bp, sizeof(struct in6_addr));
addrtostr6(bp, nla_buf, sizeof(nla_buf));
bp += sizeof(struct in6_addr);
break;
default:
goto trunc;
break;
}
ND_TCHECK2(*bp, sizeof(uint32_t));
ivl = EXTRACT_32BITS(bp);
bp += sizeof(uint32_t);
ND_TCHECK2(*bp, sizeof(uint32_t));
rnd = EXTRACT_32BITS(bp);
bp += sizeof(uint32_t);
ND_TCHECK2(*bp, sizeof(uint32_t));
mask = EXTRACT_32BITS(bp);
bp += sizeof(uint32_t);
ND_PRINT((ndo, "POLR seq %u round %u nla %s ivl %u rnd 0x%08x "
"mask 0x%08x", EXTRACT_32BITS(&polr->pgmp_seq),
EXTRACT_16BITS(&polr->pgmp_round), nla_buf, ivl, rnd, mask));
break;
}
case PGM_ODATA: {
const struct pgm_data *odata;
odata = (const struct pgm_data *)(pgm + 1);
ND_TCHECK(*odata);
ND_PRINT((ndo, "ODATA trail %u seq %u",
EXTRACT_32BITS(&odata->pgmd_trailseq),
EXTRACT_32BITS(&odata->pgmd_seq)));
bp = (const u_char *) (odata + 1);
break;
}
case PGM_RDATA: {
const struct pgm_data *rdata;
rdata = (const struct pgm_data *)(pgm + 1);
ND_TCHECK(*rdata);
ND_PRINT((ndo, "RDATA trail %u seq %u",
EXTRACT_32BITS(&rdata->pgmd_trailseq),
EXTRACT_32BITS(&rdata->pgmd_seq)));
bp = (const u_char *) (rdata + 1);
break;
}
case PGM_NAK:
case PGM_NULLNAK:
case PGM_NCF: {
const struct pgm_nak *nak;
char source_buf[INET6_ADDRSTRLEN], group_buf[INET6_ADDRSTRLEN];
nak = (const struct pgm_nak *)(pgm + 1);
ND_TCHECK(*nak);
bp = (const u_char *) (nak + 1);
/*
* Skip past the source, saving info along the way
* and stopping if we don't have enough.
*/
switch (EXTRACT_16BITS(&nak->pgmn_source_afi)) {
case AFNUM_INET:
ND_TCHECK2(*bp, sizeof(struct in_addr));
addrtostr(bp, source_buf, sizeof(source_buf));
bp += sizeof(struct in_addr);
break;
case AFNUM_INET6:
ND_TCHECK2(*bp, sizeof(struct in6_addr));
addrtostr6(bp, source_buf, sizeof(source_buf));
bp += sizeof(struct in6_addr);
break;
default:
goto trunc;
break;
}
/*
* Skip past the group, saving info along the way
* and stopping if we don't have enough.
*/
bp += (2 * sizeof(uint16_t));
switch (EXTRACT_16BITS(bp)) {
case AFNUM_INET:
ND_TCHECK2(*bp, sizeof(struct in_addr));
addrtostr(bp, group_buf, sizeof(group_buf));
bp += sizeof(struct in_addr);
break;
case AFNUM_INET6:
ND_TCHECK2(*bp, sizeof(struct in6_addr));
addrtostr6(bp, group_buf, sizeof(group_buf));
bp += sizeof(struct in6_addr);
break;
default:
goto trunc;
break;
}
/*
* Options decoding can go here.
*/
switch (pgm->pgm_type) {
case PGM_NAK:
ND_PRINT((ndo, "NAK "));
break;
case PGM_NULLNAK:
ND_PRINT((ndo, "NNAK "));
break;
case PGM_NCF:
ND_PRINT((ndo, "NCF "));
break;
default:
break;
}
ND_PRINT((ndo, "(%s -> %s), seq %u",
source_buf, group_buf, EXTRACT_32BITS(&nak->pgmn_seq)));
break;
}
case PGM_ACK: {
const struct pgm_ack *ack;
ack = (const struct pgm_ack *)(pgm + 1);
ND_TCHECK(*ack);
ND_PRINT((ndo, "ACK seq %u",
EXTRACT_32BITS(&ack->pgma_rx_max_seq)));
bp = (const u_char *) (ack + 1);
break;
}
case PGM_SPMR:
ND_PRINT((ndo, "SPMR"));
break;
default:
ND_PRINT((ndo, "UNKNOWN type 0x%02x", pgm->pgm_type));
break;
}
if (pgm->pgm_options & PGM_OPT_BIT_PRESENT) {
/*
* make sure there's enough for the first option header
*/
if (!ND_TTEST2(*bp, PGM_MIN_OPT_LEN)) {
ND_PRINT((ndo, "[|OPT]"));
return;
}
/*
* That option header MUST be an OPT_LENGTH option
* (see the first paragraph of section 9.1 in RFC 3208).
*/
opt_type = *bp++;
if ((opt_type & PGM_OPT_MASK) != PGM_OPT_LENGTH) {
ND_PRINT((ndo, "[First option bad, should be PGM_OPT_LENGTH, is %u]", opt_type & PGM_OPT_MASK));
return;
}
opt_len = *bp++;
if (opt_len != 4) {
ND_PRINT((ndo, "[Bad OPT_LENGTH option, length %u != 4]", opt_len));
return;
}
opts_len = EXTRACT_16BITS(bp);
if (opts_len < 4) {
ND_PRINT((ndo, "[Bad total option length %u < 4]", opts_len));
return;
}
bp += sizeof(uint16_t);
ND_PRINT((ndo, " OPTS LEN %d", opts_len));
opts_len -= 4;
while (opts_len) {
if (opts_len < PGM_MIN_OPT_LEN) {
ND_PRINT((ndo, "[Total option length leaves no room for final option]"));
return;
}
if (!ND_TTEST2(*bp, 2)) {
ND_PRINT((ndo, " [|OPT]"));
return;
}
opt_type = *bp++;
opt_len = *bp++;
if (opt_len < PGM_MIN_OPT_LEN) {
ND_PRINT((ndo, "[Bad option, length %u < %u]", opt_len,
PGM_MIN_OPT_LEN));
break;
}
if (opts_len < opt_len) {
ND_PRINT((ndo, "[Total option length leaves no room for final option]"));
return;
}
if (!ND_TTEST2(*bp, opt_len - 2)) {
ND_PRINT((ndo, " [|OPT]"));
return;
}
switch (opt_type & PGM_OPT_MASK) {
case PGM_OPT_LENGTH:
#define PGM_OPT_LENGTH_LEN (2+2)
if (opt_len != PGM_OPT_LENGTH_LEN) {
ND_PRINT((ndo, "[Bad OPT_LENGTH option, length %u != %u]",
opt_len, PGM_OPT_LENGTH_LEN));
return;
}
ND_PRINT((ndo, " OPTS LEN (extra?) %d", EXTRACT_16BITS(bp)));
bp += 2;
opts_len -= PGM_OPT_LENGTH_LEN;
break;
case PGM_OPT_FRAGMENT:
#define PGM_OPT_FRAGMENT_LEN (2+2+4+4+4)
if (opt_len != PGM_OPT_FRAGMENT_LEN) {
ND_PRINT((ndo, "[Bad OPT_FRAGMENT option, length %u != %u]",
opt_len, PGM_OPT_FRAGMENT_LEN));
return;
}
bp += 2;
seq = EXTRACT_32BITS(bp);
bp += 4;
offset = EXTRACT_32BITS(bp);
bp += 4;
len = EXTRACT_32BITS(bp);
bp += 4;
ND_PRINT((ndo, " FRAG seq %u off %u len %u", seq, offset, len));
opts_len -= PGM_OPT_FRAGMENT_LEN;
break;
case PGM_OPT_NAK_LIST:
bp += 2;
opt_len -= 4; /* option header */
ND_PRINT((ndo, " NAK LIST"));
while (opt_len) {
if (opt_len < 4) {
ND_PRINT((ndo, "[Option length not a multiple of 4]"));
return;
}
ND_TCHECK2(*bp, 4);
ND_PRINT((ndo, " %u", EXTRACT_32BITS(bp)));
bp += 4;
opt_len -= 4;
opts_len -= 4;
}
break;
case PGM_OPT_JOIN:
#define PGM_OPT_JOIN_LEN (2+2+4)
if (opt_len != PGM_OPT_JOIN_LEN) {
ND_PRINT((ndo, "[Bad OPT_JOIN option, length %u != %u]",
opt_len, PGM_OPT_JOIN_LEN));
return;
}
bp += 2;
seq = EXTRACT_32BITS(bp);
bp += 4;
ND_PRINT((ndo, " JOIN %u", seq));
opts_len -= PGM_OPT_JOIN_LEN;
break;
case PGM_OPT_NAK_BO_IVL:
#define PGM_OPT_NAK_BO_IVL_LEN (2+2+4+4)
if (opt_len != PGM_OPT_NAK_BO_IVL_LEN) {
ND_PRINT((ndo, "[Bad OPT_NAK_BO_IVL option, length %u != %u]",
opt_len, PGM_OPT_NAK_BO_IVL_LEN));
return;
}
bp += 2;
offset = EXTRACT_32BITS(bp);
bp += 4;
seq = EXTRACT_32BITS(bp);
bp += 4;
ND_PRINT((ndo, " BACKOFF ivl %u ivlseq %u", offset, seq));
opts_len -= PGM_OPT_NAK_BO_IVL_LEN;
break;
case PGM_OPT_NAK_BO_RNG:
#define PGM_OPT_NAK_BO_RNG_LEN (2+2+4+4)
if (opt_len != PGM_OPT_NAK_BO_RNG_LEN) {
ND_PRINT((ndo, "[Bad OPT_NAK_BO_RNG option, length %u != %u]",
opt_len, PGM_OPT_NAK_BO_RNG_LEN));
return;
}
bp += 2;
offset = EXTRACT_32BITS(bp);
bp += 4;
seq = EXTRACT_32BITS(bp);
bp += 4;
ND_PRINT((ndo, " BACKOFF max %u min %u", offset, seq));
opts_len -= PGM_OPT_NAK_BO_RNG_LEN;
break;
case PGM_OPT_REDIRECT:
#define PGM_OPT_REDIRECT_FIXED_LEN (2+2+2+2)
if (opt_len < PGM_OPT_REDIRECT_FIXED_LEN) {
ND_PRINT((ndo, "[Bad OPT_REDIRECT option, length %u < %u]",
opt_len, PGM_OPT_REDIRECT_FIXED_LEN));
return;
}
bp += 2;
nla_afnum = EXTRACT_16BITS(bp);
bp += 2+2;
switch (nla_afnum) {
case AFNUM_INET:
if (opt_len != PGM_OPT_REDIRECT_FIXED_LEN + sizeof(struct in_addr)) {
ND_PRINT((ndo, "[Bad OPT_REDIRECT option, length %u != %u + address size]",
opt_len, PGM_OPT_REDIRECT_FIXED_LEN));
return;
}
ND_TCHECK2(*bp, sizeof(struct in_addr));
addrtostr(bp, nla_buf, sizeof(nla_buf));
bp += sizeof(struct in_addr);
opts_len -= PGM_OPT_REDIRECT_FIXED_LEN + sizeof(struct in_addr);
break;
case AFNUM_INET6:
if (opt_len != PGM_OPT_REDIRECT_FIXED_LEN + sizeof(struct in6_addr)) {
ND_PRINT((ndo, "[Bad OPT_REDIRECT option, length %u != %u + address size]",
PGM_OPT_REDIRECT_FIXED_LEN, opt_len));
return;
}
ND_TCHECK2(*bp, sizeof(struct in6_addr));
addrtostr6(bp, nla_buf, sizeof(nla_buf));
bp += sizeof(struct in6_addr);
opts_len -= PGM_OPT_REDIRECT_FIXED_LEN + sizeof(struct in6_addr);
break;
default:
goto trunc;
break;
}
ND_PRINT((ndo, " REDIRECT %s", nla_buf));
break;
case PGM_OPT_PARITY_PRM:
#define PGM_OPT_PARITY_PRM_LEN (2+2+4)
if (opt_len != PGM_OPT_PARITY_PRM_LEN) {
ND_PRINT((ndo, "[Bad OPT_PARITY_PRM option, length %u != %u]",
opt_len, PGM_OPT_PARITY_PRM_LEN));
return;
}
bp += 2;
len = EXTRACT_32BITS(bp);
bp += 4;
ND_PRINT((ndo, " PARITY MAXTGS %u", len));
opts_len -= PGM_OPT_PARITY_PRM_LEN;
break;
case PGM_OPT_PARITY_GRP:
#define PGM_OPT_PARITY_GRP_LEN (2+2+4)
if (opt_len != PGM_OPT_PARITY_GRP_LEN) {
ND_PRINT((ndo, "[Bad OPT_PARITY_GRP option, length %u != %u]",
opt_len, PGM_OPT_PARITY_GRP_LEN));
return;
}
bp += 2;
seq = EXTRACT_32BITS(bp);
bp += 4;
ND_PRINT((ndo, " PARITY GROUP %u", seq));
opts_len -= PGM_OPT_PARITY_GRP_LEN;
break;
case PGM_OPT_CURR_TGSIZE:
#define PGM_OPT_CURR_TGSIZE_LEN (2+2+4)
if (opt_len != PGM_OPT_CURR_TGSIZE_LEN) {
ND_PRINT((ndo, "[Bad OPT_CURR_TGSIZE option, length %u != %u]",
opt_len, PGM_OPT_CURR_TGSIZE_LEN));
return;
}
bp += 2;
len = EXTRACT_32BITS(bp);
bp += 4;
ND_PRINT((ndo, " PARITY ATGS %u", len));
opts_len -= PGM_OPT_CURR_TGSIZE_LEN;
break;
case PGM_OPT_NBR_UNREACH:
#define PGM_OPT_NBR_UNREACH_LEN (2+2)
if (opt_len != PGM_OPT_NBR_UNREACH_LEN) {
ND_PRINT((ndo, "[Bad OPT_NBR_UNREACH option, length %u != %u]",
opt_len, PGM_OPT_NBR_UNREACH_LEN));
return;
}
bp += 2;
ND_PRINT((ndo, " NBR_UNREACH"));
opts_len -= PGM_OPT_NBR_UNREACH_LEN;
break;
case PGM_OPT_PATH_NLA:
ND_PRINT((ndo, " PATH_NLA [%d]", opt_len));
bp += opt_len;
opts_len -= opt_len;
break;
case PGM_OPT_SYN:
#define PGM_OPT_SYN_LEN (2+2)
if (opt_len != PGM_OPT_SYN_LEN) {
ND_PRINT((ndo, "[Bad OPT_SYN option, length %u != %u]",
opt_len, PGM_OPT_SYN_LEN));
return;
}
bp += 2;
ND_PRINT((ndo, " SYN"));
opts_len -= PGM_OPT_SYN_LEN;
break;
case PGM_OPT_FIN:
#define PGM_OPT_FIN_LEN (2+2)
if (opt_len != PGM_OPT_FIN_LEN) {
ND_PRINT((ndo, "[Bad OPT_FIN option, length %u != %u]",
opt_len, PGM_OPT_FIN_LEN));
return;
}
bp += 2;
ND_PRINT((ndo, " FIN"));
opts_len -= PGM_OPT_FIN_LEN;
break;
case PGM_OPT_RST:
#define PGM_OPT_RST_LEN (2+2)
if (opt_len != PGM_OPT_RST_LEN) {
ND_PRINT((ndo, "[Bad OPT_RST option, length %u != %u]",
opt_len, PGM_OPT_RST_LEN));
return;
}
bp += 2;
ND_PRINT((ndo, " RST"));
opts_len -= PGM_OPT_RST_LEN;
break;
case PGM_OPT_CR:
ND_PRINT((ndo, " CR"));
bp += opt_len;
opts_len -= opt_len;
break;
case PGM_OPT_CRQST:
#define PGM_OPT_CRQST_LEN (2+2)
if (opt_len != PGM_OPT_CRQST_LEN) {
ND_PRINT((ndo, "[Bad OPT_CRQST option, length %u != %u]",
opt_len, PGM_OPT_CRQST_LEN));
return;
}
bp += 2;
ND_PRINT((ndo, " CRQST"));
opts_len -= PGM_OPT_CRQST_LEN;
break;
case PGM_OPT_PGMCC_DATA:
#define PGM_OPT_PGMCC_DATA_FIXED_LEN (2+2+4+2+2)
if (opt_len < PGM_OPT_PGMCC_DATA_FIXED_LEN) {
ND_PRINT((ndo, "[Bad OPT_PGMCC_DATA option, length %u < %u]",
opt_len, PGM_OPT_PGMCC_DATA_FIXED_LEN));
return;
}
bp += 2;
offset = EXTRACT_32BITS(bp);
bp += 4;
nla_afnum = EXTRACT_16BITS(bp);
bp += 2+2;
switch (nla_afnum) {
case AFNUM_INET:
if (opt_len != PGM_OPT_PGMCC_DATA_FIXED_LEN + sizeof(struct in_addr)) {
ND_PRINT((ndo, "[Bad OPT_PGMCC_DATA option, length %u != %u + address size]",
opt_len, PGM_OPT_PGMCC_DATA_FIXED_LEN));
return;
}
ND_TCHECK2(*bp, sizeof(struct in_addr));
addrtostr(bp, nla_buf, sizeof(nla_buf));
bp += sizeof(struct in_addr);
opts_len -= PGM_OPT_PGMCC_DATA_FIXED_LEN + sizeof(struct in_addr);
break;
case AFNUM_INET6:
if (opt_len != PGM_OPT_PGMCC_DATA_FIXED_LEN + sizeof(struct in6_addr)) {
ND_PRINT((ndo, "[Bad OPT_PGMCC_DATA option, length %u != %u + address size]",
opt_len, PGM_OPT_PGMCC_DATA_FIXED_LEN));
return;
}
ND_TCHECK2(*bp, sizeof(struct in6_addr));
addrtostr6(bp, nla_buf, sizeof(nla_buf));
bp += sizeof(struct in6_addr);
opts_len -= PGM_OPT_PGMCC_DATA_FIXED_LEN + sizeof(struct in6_addr);
break;
default:
goto trunc;
break;
}
ND_PRINT((ndo, " PGMCC DATA %u %s", offset, nla_buf));
break;
case PGM_OPT_PGMCC_FEEDBACK:
#define PGM_OPT_PGMCC_FEEDBACK_FIXED_LEN (2+2+4+2+2)
if (opt_len < PGM_OPT_PGMCC_FEEDBACK_FIXED_LEN) {
ND_PRINT((ndo, "[Bad PGM_OPT_PGMCC_FEEDBACK option, length %u < %u]",
opt_len, PGM_OPT_PGMCC_FEEDBACK_FIXED_LEN));
return;
}
bp += 2;
offset = EXTRACT_32BITS(bp);
bp += 4;
nla_afnum = EXTRACT_16BITS(bp);
bp += 2+2;
switch (nla_afnum) {
case AFNUM_INET:
if (opt_len != PGM_OPT_PGMCC_FEEDBACK_FIXED_LEN + sizeof(struct in_addr)) {
ND_PRINT((ndo, "[Bad OPT_PGMCC_FEEDBACK option, length %u != %u + address size]",
opt_len, PGM_OPT_PGMCC_FEEDBACK_FIXED_LEN));
return;
}
ND_TCHECK2(*bp, sizeof(struct in_addr));
addrtostr(bp, nla_buf, sizeof(nla_buf));
bp += sizeof(struct in_addr);
opts_len -= PGM_OPT_PGMCC_FEEDBACK_FIXED_LEN + sizeof(struct in_addr);
break;
case AFNUM_INET6:
if (opt_len != PGM_OPT_PGMCC_FEEDBACK_FIXED_LEN + sizeof(struct in6_addr)) {
ND_PRINT((ndo, "[Bad OPT_PGMCC_FEEDBACK option, length %u != %u + address size]",
opt_len, PGM_OPT_PGMCC_FEEDBACK_FIXED_LEN));
return;
}
ND_TCHECK2(*bp, sizeof(struct in6_addr));
addrtostr6(bp, nla_buf, sizeof(nla_buf));
bp += sizeof(struct in6_addr);
opts_len -= PGM_OPT_PGMCC_FEEDBACK_FIXED_LEN + sizeof(struct in6_addr);
break;
default:
goto trunc;
break;
}
ND_PRINT((ndo, " PGMCC FEEDBACK %u %s", offset, nla_buf));
break;
default:
ND_PRINT((ndo, " OPT_%02X [%d] ", opt_type, opt_len));
bp += opt_len;
opts_len -= opt_len;
break;
}
if (opt_type & PGM_OPT_END)
break;
}
}
ND_PRINT((ndo, " [%u]", length));
if (ndo->ndo_packettype == PT_PGM_ZMTP1 &&
(pgm->pgm_type == PGM_ODATA || pgm->pgm_type == PGM_RDATA))
zmtp1_print_datagram(ndo, bp, EXTRACT_16BITS(&pgm->pgm_length));
return;
trunc:
ND_PRINT((ndo, "[|pgm]"));
if (ch != '\0')
ND_PRINT((ndo, ">"));
}
Commit Message: CVE-2017-13034/PGM: Add a bounds check.
This fixes a buffer over-read discovered by Bhargava Shastry,
SecT/TU Berlin.
Add a test using the capture file supplied by the reporter(s), modified
so the capture file won't be rejected as an invalid capture.
Move a return to make the code a bit cleaner (i.e., make it more obvious
that if we don't have enough of the PGM header, we just print the source
and destination IP addresses, mark it as incomplete PGM, and don't try
to look at the PGM header).
CWE ID: CWE-125 | pgm_print(netdissect_options *ndo,
register const u_char *bp, register u_int length,
register const u_char *bp2)
{
register const struct pgm_header *pgm;
register const struct ip *ip;
register char ch;
uint16_t sport, dport;
u_int nla_afnum;
char nla_buf[INET6_ADDRSTRLEN];
register const struct ip6_hdr *ip6;
uint8_t opt_type, opt_len;
uint32_t seq, opts_len, len, offset;
pgm = (const struct pgm_header *)bp;
ip = (const struct ip *)bp2;
if (IP_V(ip) == 6)
ip6 = (const struct ip6_hdr *)bp2;
else
ip6 = NULL;
ch = '\0';
if (!ND_TTEST(pgm->pgm_dport)) {
if (ip6) {
ND_PRINT((ndo, "%s > %s: [|pgm]",
ip6addr_string(ndo, &ip6->ip6_src),
ip6addr_string(ndo, &ip6->ip6_dst)));
} else {
ND_PRINT((ndo, "%s > %s: [|pgm]",
ipaddr_string(ndo, &ip->ip_src),
ipaddr_string(ndo, &ip->ip_dst)));
}
return;
}
sport = EXTRACT_16BITS(&pgm->pgm_sport);
dport = EXTRACT_16BITS(&pgm->pgm_dport);
if (ip6) {
if (ip6->ip6_nxt == IPPROTO_PGM) {
ND_PRINT((ndo, "%s.%s > %s.%s: ",
ip6addr_string(ndo, &ip6->ip6_src),
tcpport_string(ndo, sport),
ip6addr_string(ndo, &ip6->ip6_dst),
tcpport_string(ndo, dport)));
} else {
ND_PRINT((ndo, "%s > %s: ",
tcpport_string(ndo, sport), tcpport_string(ndo, dport)));
}
} else {
if (ip->ip_p == IPPROTO_PGM) {
ND_PRINT((ndo, "%s.%s > %s.%s: ",
ipaddr_string(ndo, &ip->ip_src),
tcpport_string(ndo, sport),
ipaddr_string(ndo, &ip->ip_dst),
tcpport_string(ndo, dport)));
} else {
ND_PRINT((ndo, "%s > %s: ",
tcpport_string(ndo, sport), tcpport_string(ndo, dport)));
}
}
ND_TCHECK(*pgm);
ND_PRINT((ndo, "PGM, length %u", EXTRACT_16BITS(&pgm->pgm_length)));
if (!ndo->ndo_vflag)
return;
ND_PRINT((ndo, " 0x%02x%02x%02x%02x%02x%02x ",
pgm->pgm_gsid[0],
pgm->pgm_gsid[1],
pgm->pgm_gsid[2],
pgm->pgm_gsid[3],
pgm->pgm_gsid[4],
pgm->pgm_gsid[5]));
switch (pgm->pgm_type) {
case PGM_SPM: {
const struct pgm_spm *spm;
spm = (const struct pgm_spm *)(pgm + 1);
ND_TCHECK(*spm);
bp = (const u_char *) (spm + 1);
switch (EXTRACT_16BITS(&spm->pgms_nla_afi)) {
case AFNUM_INET:
ND_TCHECK2(*bp, sizeof(struct in_addr));
addrtostr(bp, nla_buf, sizeof(nla_buf));
bp += sizeof(struct in_addr);
break;
case AFNUM_INET6:
ND_TCHECK2(*bp, sizeof(struct in6_addr));
addrtostr6(bp, nla_buf, sizeof(nla_buf));
bp += sizeof(struct in6_addr);
break;
default:
goto trunc;
break;
}
ND_PRINT((ndo, "SPM seq %u trail %u lead %u nla %s",
EXTRACT_32BITS(&spm->pgms_seq),
EXTRACT_32BITS(&spm->pgms_trailseq),
EXTRACT_32BITS(&spm->pgms_leadseq),
nla_buf));
break;
}
case PGM_POLL: {
const struct pgm_poll *poll_msg;
poll_msg = (const struct pgm_poll *)(pgm + 1);
ND_TCHECK(*poll_msg);
ND_PRINT((ndo, "POLL seq %u round %u",
EXTRACT_32BITS(&poll_msg->pgmp_seq),
EXTRACT_16BITS(&poll_msg->pgmp_round)));
bp = (const u_char *) (poll_msg + 1);
break;
}
case PGM_POLR: {
const struct pgm_polr *polr;
uint32_t ivl, rnd, mask;
polr = (const struct pgm_polr *)(pgm + 1);
ND_TCHECK(*polr);
bp = (const u_char *) (polr + 1);
switch (EXTRACT_16BITS(&polr->pgmp_nla_afi)) {
case AFNUM_INET:
ND_TCHECK2(*bp, sizeof(struct in_addr));
addrtostr(bp, nla_buf, sizeof(nla_buf));
bp += sizeof(struct in_addr);
break;
case AFNUM_INET6:
ND_TCHECK2(*bp, sizeof(struct in6_addr));
addrtostr6(bp, nla_buf, sizeof(nla_buf));
bp += sizeof(struct in6_addr);
break;
default:
goto trunc;
break;
}
ND_TCHECK2(*bp, sizeof(uint32_t));
ivl = EXTRACT_32BITS(bp);
bp += sizeof(uint32_t);
ND_TCHECK2(*bp, sizeof(uint32_t));
rnd = EXTRACT_32BITS(bp);
bp += sizeof(uint32_t);
ND_TCHECK2(*bp, sizeof(uint32_t));
mask = EXTRACT_32BITS(bp);
bp += sizeof(uint32_t);
ND_PRINT((ndo, "POLR seq %u round %u nla %s ivl %u rnd 0x%08x "
"mask 0x%08x", EXTRACT_32BITS(&polr->pgmp_seq),
EXTRACT_16BITS(&polr->pgmp_round), nla_buf, ivl, rnd, mask));
break;
}
case PGM_ODATA: {
const struct pgm_data *odata;
odata = (const struct pgm_data *)(pgm + 1);
ND_TCHECK(*odata);
ND_PRINT((ndo, "ODATA trail %u seq %u",
EXTRACT_32BITS(&odata->pgmd_trailseq),
EXTRACT_32BITS(&odata->pgmd_seq)));
bp = (const u_char *) (odata + 1);
break;
}
case PGM_RDATA: {
const struct pgm_data *rdata;
rdata = (const struct pgm_data *)(pgm + 1);
ND_TCHECK(*rdata);
ND_PRINT((ndo, "RDATA trail %u seq %u",
EXTRACT_32BITS(&rdata->pgmd_trailseq),
EXTRACT_32BITS(&rdata->pgmd_seq)));
bp = (const u_char *) (rdata + 1);
break;
}
case PGM_NAK:
case PGM_NULLNAK:
case PGM_NCF: {
const struct pgm_nak *nak;
char source_buf[INET6_ADDRSTRLEN], group_buf[INET6_ADDRSTRLEN];
nak = (const struct pgm_nak *)(pgm + 1);
ND_TCHECK(*nak);
bp = (const u_char *) (nak + 1);
/*
* Skip past the source, saving info along the way
* and stopping if we don't have enough.
*/
switch (EXTRACT_16BITS(&nak->pgmn_source_afi)) {
case AFNUM_INET:
ND_TCHECK2(*bp, sizeof(struct in_addr));
addrtostr(bp, source_buf, sizeof(source_buf));
bp += sizeof(struct in_addr);
break;
case AFNUM_INET6:
ND_TCHECK2(*bp, sizeof(struct in6_addr));
addrtostr6(bp, source_buf, sizeof(source_buf));
bp += sizeof(struct in6_addr);
break;
default:
goto trunc;
break;
}
/*
* Skip past the group, saving info along the way
* and stopping if we don't have enough.
*/
bp += (2 * sizeof(uint16_t));
ND_TCHECK_16BITS(bp);
switch (EXTRACT_16BITS(bp)) {
case AFNUM_INET:
ND_TCHECK2(*bp, sizeof(struct in_addr));
addrtostr(bp, group_buf, sizeof(group_buf));
bp += sizeof(struct in_addr);
break;
case AFNUM_INET6:
ND_TCHECK2(*bp, sizeof(struct in6_addr));
addrtostr6(bp, group_buf, sizeof(group_buf));
bp += sizeof(struct in6_addr);
break;
default:
goto trunc;
break;
}
/*
* Options decoding can go here.
*/
switch (pgm->pgm_type) {
case PGM_NAK:
ND_PRINT((ndo, "NAK "));
break;
case PGM_NULLNAK:
ND_PRINT((ndo, "NNAK "));
break;
case PGM_NCF:
ND_PRINT((ndo, "NCF "));
break;
default:
break;
}
ND_PRINT((ndo, "(%s -> %s), seq %u",
source_buf, group_buf, EXTRACT_32BITS(&nak->pgmn_seq)));
break;
}
case PGM_ACK: {
const struct pgm_ack *ack;
ack = (const struct pgm_ack *)(pgm + 1);
ND_TCHECK(*ack);
ND_PRINT((ndo, "ACK seq %u",
EXTRACT_32BITS(&ack->pgma_rx_max_seq)));
bp = (const u_char *) (ack + 1);
break;
}
case PGM_SPMR:
ND_PRINT((ndo, "SPMR"));
break;
default:
ND_PRINT((ndo, "UNKNOWN type 0x%02x", pgm->pgm_type));
break;
}
if (pgm->pgm_options & PGM_OPT_BIT_PRESENT) {
/*
* make sure there's enough for the first option header
*/
if (!ND_TTEST2(*bp, PGM_MIN_OPT_LEN)) {
ND_PRINT((ndo, "[|OPT]"));
return;
}
/*
* That option header MUST be an OPT_LENGTH option
* (see the first paragraph of section 9.1 in RFC 3208).
*/
opt_type = *bp++;
if ((opt_type & PGM_OPT_MASK) != PGM_OPT_LENGTH) {
ND_PRINT((ndo, "[First option bad, should be PGM_OPT_LENGTH, is %u]", opt_type & PGM_OPT_MASK));
return;
}
opt_len = *bp++;
if (opt_len != 4) {
ND_PRINT((ndo, "[Bad OPT_LENGTH option, length %u != 4]", opt_len));
return;
}
opts_len = EXTRACT_16BITS(bp);
if (opts_len < 4) {
ND_PRINT((ndo, "[Bad total option length %u < 4]", opts_len));
return;
}
bp += sizeof(uint16_t);
ND_PRINT((ndo, " OPTS LEN %d", opts_len));
opts_len -= 4;
while (opts_len) {
if (opts_len < PGM_MIN_OPT_LEN) {
ND_PRINT((ndo, "[Total option length leaves no room for final option]"));
return;
}
if (!ND_TTEST2(*bp, 2)) {
ND_PRINT((ndo, " [|OPT]"));
return;
}
opt_type = *bp++;
opt_len = *bp++;
if (opt_len < PGM_MIN_OPT_LEN) {
ND_PRINT((ndo, "[Bad option, length %u < %u]", opt_len,
PGM_MIN_OPT_LEN));
break;
}
if (opts_len < opt_len) {
ND_PRINT((ndo, "[Total option length leaves no room for final option]"));
return;
}
if (!ND_TTEST2(*bp, opt_len - 2)) {
ND_PRINT((ndo, " [|OPT]"));
return;
}
switch (opt_type & PGM_OPT_MASK) {
case PGM_OPT_LENGTH:
#define PGM_OPT_LENGTH_LEN (2+2)
if (opt_len != PGM_OPT_LENGTH_LEN) {
ND_PRINT((ndo, "[Bad OPT_LENGTH option, length %u != %u]",
opt_len, PGM_OPT_LENGTH_LEN));
return;
}
ND_PRINT((ndo, " OPTS LEN (extra?) %d", EXTRACT_16BITS(bp)));
bp += 2;
opts_len -= PGM_OPT_LENGTH_LEN;
break;
case PGM_OPT_FRAGMENT:
#define PGM_OPT_FRAGMENT_LEN (2+2+4+4+4)
if (opt_len != PGM_OPT_FRAGMENT_LEN) {
ND_PRINT((ndo, "[Bad OPT_FRAGMENT option, length %u != %u]",
opt_len, PGM_OPT_FRAGMENT_LEN));
return;
}
bp += 2;
seq = EXTRACT_32BITS(bp);
bp += 4;
offset = EXTRACT_32BITS(bp);
bp += 4;
len = EXTRACT_32BITS(bp);
bp += 4;
ND_PRINT((ndo, " FRAG seq %u off %u len %u", seq, offset, len));
opts_len -= PGM_OPT_FRAGMENT_LEN;
break;
case PGM_OPT_NAK_LIST:
bp += 2;
opt_len -= 4; /* option header */
ND_PRINT((ndo, " NAK LIST"));
while (opt_len) {
if (opt_len < 4) {
ND_PRINT((ndo, "[Option length not a multiple of 4]"));
return;
}
ND_TCHECK2(*bp, 4);
ND_PRINT((ndo, " %u", EXTRACT_32BITS(bp)));
bp += 4;
opt_len -= 4;
opts_len -= 4;
}
break;
case PGM_OPT_JOIN:
#define PGM_OPT_JOIN_LEN (2+2+4)
if (opt_len != PGM_OPT_JOIN_LEN) {
ND_PRINT((ndo, "[Bad OPT_JOIN option, length %u != %u]",
opt_len, PGM_OPT_JOIN_LEN));
return;
}
bp += 2;
seq = EXTRACT_32BITS(bp);
bp += 4;
ND_PRINT((ndo, " JOIN %u", seq));
opts_len -= PGM_OPT_JOIN_LEN;
break;
case PGM_OPT_NAK_BO_IVL:
#define PGM_OPT_NAK_BO_IVL_LEN (2+2+4+4)
if (opt_len != PGM_OPT_NAK_BO_IVL_LEN) {
ND_PRINT((ndo, "[Bad OPT_NAK_BO_IVL option, length %u != %u]",
opt_len, PGM_OPT_NAK_BO_IVL_LEN));
return;
}
bp += 2;
offset = EXTRACT_32BITS(bp);
bp += 4;
seq = EXTRACT_32BITS(bp);
bp += 4;
ND_PRINT((ndo, " BACKOFF ivl %u ivlseq %u", offset, seq));
opts_len -= PGM_OPT_NAK_BO_IVL_LEN;
break;
case PGM_OPT_NAK_BO_RNG:
#define PGM_OPT_NAK_BO_RNG_LEN (2+2+4+4)
if (opt_len != PGM_OPT_NAK_BO_RNG_LEN) {
ND_PRINT((ndo, "[Bad OPT_NAK_BO_RNG option, length %u != %u]",
opt_len, PGM_OPT_NAK_BO_RNG_LEN));
return;
}
bp += 2;
offset = EXTRACT_32BITS(bp);
bp += 4;
seq = EXTRACT_32BITS(bp);
bp += 4;
ND_PRINT((ndo, " BACKOFF max %u min %u", offset, seq));
opts_len -= PGM_OPT_NAK_BO_RNG_LEN;
break;
case PGM_OPT_REDIRECT:
#define PGM_OPT_REDIRECT_FIXED_LEN (2+2+2+2)
if (opt_len < PGM_OPT_REDIRECT_FIXED_LEN) {
ND_PRINT((ndo, "[Bad OPT_REDIRECT option, length %u < %u]",
opt_len, PGM_OPT_REDIRECT_FIXED_LEN));
return;
}
bp += 2;
nla_afnum = EXTRACT_16BITS(bp);
bp += 2+2;
switch (nla_afnum) {
case AFNUM_INET:
if (opt_len != PGM_OPT_REDIRECT_FIXED_LEN + sizeof(struct in_addr)) {
ND_PRINT((ndo, "[Bad OPT_REDIRECT option, length %u != %u + address size]",
opt_len, PGM_OPT_REDIRECT_FIXED_LEN));
return;
}
ND_TCHECK2(*bp, sizeof(struct in_addr));
addrtostr(bp, nla_buf, sizeof(nla_buf));
bp += sizeof(struct in_addr);
opts_len -= PGM_OPT_REDIRECT_FIXED_LEN + sizeof(struct in_addr);
break;
case AFNUM_INET6:
if (opt_len != PGM_OPT_REDIRECT_FIXED_LEN + sizeof(struct in6_addr)) {
ND_PRINT((ndo, "[Bad OPT_REDIRECT option, length %u != %u + address size]",
PGM_OPT_REDIRECT_FIXED_LEN, opt_len));
return;
}
ND_TCHECK2(*bp, sizeof(struct in6_addr));
addrtostr6(bp, nla_buf, sizeof(nla_buf));
bp += sizeof(struct in6_addr);
opts_len -= PGM_OPT_REDIRECT_FIXED_LEN + sizeof(struct in6_addr);
break;
default:
goto trunc;
break;
}
ND_PRINT((ndo, " REDIRECT %s", nla_buf));
break;
case PGM_OPT_PARITY_PRM:
#define PGM_OPT_PARITY_PRM_LEN (2+2+4)
if (opt_len != PGM_OPT_PARITY_PRM_LEN) {
ND_PRINT((ndo, "[Bad OPT_PARITY_PRM option, length %u != %u]",
opt_len, PGM_OPT_PARITY_PRM_LEN));
return;
}
bp += 2;
len = EXTRACT_32BITS(bp);
bp += 4;
ND_PRINT((ndo, " PARITY MAXTGS %u", len));
opts_len -= PGM_OPT_PARITY_PRM_LEN;
break;
case PGM_OPT_PARITY_GRP:
#define PGM_OPT_PARITY_GRP_LEN (2+2+4)
if (opt_len != PGM_OPT_PARITY_GRP_LEN) {
ND_PRINT((ndo, "[Bad OPT_PARITY_GRP option, length %u != %u]",
opt_len, PGM_OPT_PARITY_GRP_LEN));
return;
}
bp += 2;
seq = EXTRACT_32BITS(bp);
bp += 4;
ND_PRINT((ndo, " PARITY GROUP %u", seq));
opts_len -= PGM_OPT_PARITY_GRP_LEN;
break;
case PGM_OPT_CURR_TGSIZE:
#define PGM_OPT_CURR_TGSIZE_LEN (2+2+4)
if (opt_len != PGM_OPT_CURR_TGSIZE_LEN) {
ND_PRINT((ndo, "[Bad OPT_CURR_TGSIZE option, length %u != %u]",
opt_len, PGM_OPT_CURR_TGSIZE_LEN));
return;
}
bp += 2;
len = EXTRACT_32BITS(bp);
bp += 4;
ND_PRINT((ndo, " PARITY ATGS %u", len));
opts_len -= PGM_OPT_CURR_TGSIZE_LEN;
break;
case PGM_OPT_NBR_UNREACH:
#define PGM_OPT_NBR_UNREACH_LEN (2+2)
if (opt_len != PGM_OPT_NBR_UNREACH_LEN) {
ND_PRINT((ndo, "[Bad OPT_NBR_UNREACH option, length %u != %u]",
opt_len, PGM_OPT_NBR_UNREACH_LEN));
return;
}
bp += 2;
ND_PRINT((ndo, " NBR_UNREACH"));
opts_len -= PGM_OPT_NBR_UNREACH_LEN;
break;
case PGM_OPT_PATH_NLA:
ND_PRINT((ndo, " PATH_NLA [%d]", opt_len));
bp += opt_len;
opts_len -= opt_len;
break;
case PGM_OPT_SYN:
#define PGM_OPT_SYN_LEN (2+2)
if (opt_len != PGM_OPT_SYN_LEN) {
ND_PRINT((ndo, "[Bad OPT_SYN option, length %u != %u]",
opt_len, PGM_OPT_SYN_LEN));
return;
}
bp += 2;
ND_PRINT((ndo, " SYN"));
opts_len -= PGM_OPT_SYN_LEN;
break;
case PGM_OPT_FIN:
#define PGM_OPT_FIN_LEN (2+2)
if (opt_len != PGM_OPT_FIN_LEN) {
ND_PRINT((ndo, "[Bad OPT_FIN option, length %u != %u]",
opt_len, PGM_OPT_FIN_LEN));
return;
}
bp += 2;
ND_PRINT((ndo, " FIN"));
opts_len -= PGM_OPT_FIN_LEN;
break;
case PGM_OPT_RST:
#define PGM_OPT_RST_LEN (2+2)
if (opt_len != PGM_OPT_RST_LEN) {
ND_PRINT((ndo, "[Bad OPT_RST option, length %u != %u]",
opt_len, PGM_OPT_RST_LEN));
return;
}
bp += 2;
ND_PRINT((ndo, " RST"));
opts_len -= PGM_OPT_RST_LEN;
break;
case PGM_OPT_CR:
ND_PRINT((ndo, " CR"));
bp += opt_len;
opts_len -= opt_len;
break;
case PGM_OPT_CRQST:
#define PGM_OPT_CRQST_LEN (2+2)
if (opt_len != PGM_OPT_CRQST_LEN) {
ND_PRINT((ndo, "[Bad OPT_CRQST option, length %u != %u]",
opt_len, PGM_OPT_CRQST_LEN));
return;
}
bp += 2;
ND_PRINT((ndo, " CRQST"));
opts_len -= PGM_OPT_CRQST_LEN;
break;
case PGM_OPT_PGMCC_DATA:
#define PGM_OPT_PGMCC_DATA_FIXED_LEN (2+2+4+2+2)
if (opt_len < PGM_OPT_PGMCC_DATA_FIXED_LEN) {
ND_PRINT((ndo, "[Bad OPT_PGMCC_DATA option, length %u < %u]",
opt_len, PGM_OPT_PGMCC_DATA_FIXED_LEN));
return;
}
bp += 2;
offset = EXTRACT_32BITS(bp);
bp += 4;
nla_afnum = EXTRACT_16BITS(bp);
bp += 2+2;
switch (nla_afnum) {
case AFNUM_INET:
if (opt_len != PGM_OPT_PGMCC_DATA_FIXED_LEN + sizeof(struct in_addr)) {
ND_PRINT((ndo, "[Bad OPT_PGMCC_DATA option, length %u != %u + address size]",
opt_len, PGM_OPT_PGMCC_DATA_FIXED_LEN));
return;
}
ND_TCHECK2(*bp, sizeof(struct in_addr));
addrtostr(bp, nla_buf, sizeof(nla_buf));
bp += sizeof(struct in_addr);
opts_len -= PGM_OPT_PGMCC_DATA_FIXED_LEN + sizeof(struct in_addr);
break;
case AFNUM_INET6:
if (opt_len != PGM_OPT_PGMCC_DATA_FIXED_LEN + sizeof(struct in6_addr)) {
ND_PRINT((ndo, "[Bad OPT_PGMCC_DATA option, length %u != %u + address size]",
opt_len, PGM_OPT_PGMCC_DATA_FIXED_LEN));
return;
}
ND_TCHECK2(*bp, sizeof(struct in6_addr));
addrtostr6(bp, nla_buf, sizeof(nla_buf));
bp += sizeof(struct in6_addr);
opts_len -= PGM_OPT_PGMCC_DATA_FIXED_LEN + sizeof(struct in6_addr);
break;
default:
goto trunc;
break;
}
ND_PRINT((ndo, " PGMCC DATA %u %s", offset, nla_buf));
break;
case PGM_OPT_PGMCC_FEEDBACK:
#define PGM_OPT_PGMCC_FEEDBACK_FIXED_LEN (2+2+4+2+2)
if (opt_len < PGM_OPT_PGMCC_FEEDBACK_FIXED_LEN) {
ND_PRINT((ndo, "[Bad PGM_OPT_PGMCC_FEEDBACK option, length %u < %u]",
opt_len, PGM_OPT_PGMCC_FEEDBACK_FIXED_LEN));
return;
}
bp += 2;
offset = EXTRACT_32BITS(bp);
bp += 4;
nla_afnum = EXTRACT_16BITS(bp);
bp += 2+2;
switch (nla_afnum) {
case AFNUM_INET:
if (opt_len != PGM_OPT_PGMCC_FEEDBACK_FIXED_LEN + sizeof(struct in_addr)) {
ND_PRINT((ndo, "[Bad OPT_PGMCC_FEEDBACK option, length %u != %u + address size]",
opt_len, PGM_OPT_PGMCC_FEEDBACK_FIXED_LEN));
return;
}
ND_TCHECK2(*bp, sizeof(struct in_addr));
addrtostr(bp, nla_buf, sizeof(nla_buf));
bp += sizeof(struct in_addr);
opts_len -= PGM_OPT_PGMCC_FEEDBACK_FIXED_LEN + sizeof(struct in_addr);
break;
case AFNUM_INET6:
if (opt_len != PGM_OPT_PGMCC_FEEDBACK_FIXED_LEN + sizeof(struct in6_addr)) {
ND_PRINT((ndo, "[Bad OPT_PGMCC_FEEDBACK option, length %u != %u + address size]",
opt_len, PGM_OPT_PGMCC_FEEDBACK_FIXED_LEN));
return;
}
ND_TCHECK2(*bp, sizeof(struct in6_addr));
addrtostr6(bp, nla_buf, sizeof(nla_buf));
bp += sizeof(struct in6_addr);
opts_len -= PGM_OPT_PGMCC_FEEDBACK_FIXED_LEN + sizeof(struct in6_addr);
break;
default:
goto trunc;
break;
}
ND_PRINT((ndo, " PGMCC FEEDBACK %u %s", offset, nla_buf));
break;
default:
ND_PRINT((ndo, " OPT_%02X [%d] ", opt_type, opt_len));
bp += opt_len;
opts_len -= opt_len;
break;
}
if (opt_type & PGM_OPT_END)
break;
}
}
ND_PRINT((ndo, " [%u]", length));
if (ndo->ndo_packettype == PT_PGM_ZMTP1 &&
(pgm->pgm_type == PGM_ODATA || pgm->pgm_type == PGM_RDATA))
zmtp1_print_datagram(ndo, bp, EXTRACT_16BITS(&pgm->pgm_length));
return;
trunc:
ND_PRINT((ndo, "[|pgm]"));
if (ch != '\0')
ND_PRINT((ndo, ">"));
}
| 167,849 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: grub_ext2_read_block (grub_fshelp_node_t node, grub_disk_addr_t fileblock)
{
struct grub_ext2_data *data = node->data;
struct grub_ext2_inode *inode = &node->inode;
int blknr = -1;
unsigned int blksz = EXT2_BLOCK_SIZE (data);
int log2_blksz = LOG2_EXT2_BLOCK_SIZE (data);
if (grub_le_to_cpu32(inode->flags) & EXT4_EXTENTS_FLAG)
{
#ifndef _MSC_VER
char buf[EXT2_BLOCK_SIZE (data)];
#else
char * buf = grub_malloc (EXT2_BLOCK_SIZE(data));
#endif
struct grub_ext4_extent_header *leaf;
struct grub_ext4_extent *ext;
int i;
leaf = grub_ext4_find_leaf (data, buf,
(struct grub_ext4_extent_header *) inode->blocks.dir_blocks,
fileblock);
if (! leaf)
{
grub_error (GRUB_ERR_BAD_FS, "invalid extent");
return -1;
}
ext = (struct grub_ext4_extent *) (leaf + 1);
for (i = 0; i < grub_le_to_cpu16 (leaf->entries); i++)
{
if (fileblock < grub_le_to_cpu32 (ext[i].block))
break;
}
if (--i >= 0)
{
fileblock -= grub_le_to_cpu32 (ext[i].block);
if (fileblock >= grub_le_to_cpu16 (ext[i].len))
return 0;
else
{
grub_disk_addr_t start;
start = grub_le_to_cpu16 (ext[i].start_hi);
start = (start << 32) + grub_le_to_cpu32 (ext[i].start);
return fileblock + start;
}
}
else
{
grub_error (GRUB_ERR_BAD_FS, "something wrong with extent");
return -1;
}
}
/* Direct blocks. */
if (fileblock < INDIRECT_BLOCKS) {
blknr = grub_le_to_cpu32 (inode->blocks.dir_blocks[fileblock]);
/* Indirect. */
} else if (fileblock < INDIRECT_BLOCKS + blksz / 4)
{
grub_uint32_t *indir;
indir = grub_malloc (blksz);
if (! indir)
return grub_errno;
if (grub_disk_read (data->disk,
((grub_disk_addr_t)
grub_le_to_cpu32 (inode->blocks.indir_block))
<< log2_blksz,
0, blksz, indir))
return grub_errno;
blknr = grub_le_to_cpu32 (indir[fileblock - INDIRECT_BLOCKS]);
grub_free (indir);
}
/* Double indirect. */
else if (fileblock < (grub_disk_addr_t)(INDIRECT_BLOCKS + blksz / 4) \
* (grub_disk_addr_t)(blksz / 4 + 1))
{
unsigned int perblock = blksz / 4;
unsigned int rblock = fileblock - (INDIRECT_BLOCKS
+ blksz / 4);
grub_uint32_t *indir;
indir = grub_malloc (blksz);
if (! indir)
return grub_errno;
if (grub_disk_read (data->disk,
((grub_disk_addr_t)
grub_le_to_cpu32 (inode->blocks.double_indir_block))
<< log2_blksz,
0, blksz, indir))
return grub_errno;
if (grub_disk_read (data->disk,
((grub_disk_addr_t)
grub_le_to_cpu32 (indir[rblock / perblock]))
<< log2_blksz,
0, blksz, indir))
return grub_errno;
blknr = grub_le_to_cpu32 (indir[rblock % perblock]);
grub_free (indir);
}
/* triple indirect. */
else
{
grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET,
"ext2fs doesn't support triple indirect blocks");
}
return blknr;
}
Commit Message: Fix #7723 - crash in ext2 GRUB code because of variable size array in stack
CWE ID: CWE-119 | grub_ext2_read_block (grub_fshelp_node_t node, grub_disk_addr_t fileblock)
{
struct grub_ext2_data *data = node->data;
struct grub_ext2_inode *inode = &node->inode;
int blknr = -1;
unsigned int blksz = EXT2_BLOCK_SIZE (data);
int log2_blksz = LOG2_EXT2_BLOCK_SIZE (data);
if (grub_le_to_cpu32(inode->flags) & EXT4_EXTENTS_FLAG)
{
char * buf = grub_malloc (EXT2_BLOCK_SIZE (data));
if (!buf) {
return -1;
}
struct grub_ext4_extent_header *leaf;
struct grub_ext4_extent *ext;
int i;
leaf = grub_ext4_find_leaf (data, buf,
(struct grub_ext4_extent_header *) inode->blocks.dir_blocks,
fileblock);
if (! leaf)
{
grub_error (GRUB_ERR_BAD_FS, "invalid extent");
free (buf);
return -1;
}
ext = (struct grub_ext4_extent *) (leaf + 1);
for (i = 0; i < grub_le_to_cpu16 (leaf->entries); i++)
{
if (fileblock < grub_le_to_cpu32 (ext[i].block))
break;
}
if (--i >= 0)
{
fileblock -= grub_le_to_cpu32 (ext[i].block);
if (fileblock >= grub_le_to_cpu16 (ext[i].len)) {
free (buf);
return 0;
} else
{
grub_disk_addr_t start;
start = grub_le_to_cpu16 (ext[i].start_hi);
start = (start << 32) + grub_le_to_cpu32 (ext[i].start);
free (buf);
return fileblock + start;
}
}
else
{
grub_error (GRUB_ERR_BAD_FS, "something wrong with extent");
free (buf);
return -1;
}
free (buf);
}
/* Direct blocks. */
if (fileblock < INDIRECT_BLOCKS) {
blknr = grub_le_to_cpu32 (inode->blocks.dir_blocks[fileblock]);
/* Indirect. */
} else if (fileblock < INDIRECT_BLOCKS + blksz / 4)
{
grub_uint32_t *indir;
indir = grub_malloc (blksz);
if (! indir) {
return grub_errno;
}
if (grub_disk_read (data->disk,
((grub_disk_addr_t)
grub_le_to_cpu32 (inode->blocks.indir_block))
<< log2_blksz,
0, blksz, indir)) {
return grub_errno;
}
blknr = grub_le_to_cpu32 (indir[fileblock - INDIRECT_BLOCKS]);
grub_free (indir);
}
/* Double indirect. */
else if (fileblock < (grub_disk_addr_t)(INDIRECT_BLOCKS + blksz / 4) \
* (grub_disk_addr_t)(blksz / 4 + 1))
{
unsigned int perblock = blksz / 4;
unsigned int rblock = fileblock - (INDIRECT_BLOCKS
+ blksz / 4);
grub_uint32_t *indir;
indir = grub_malloc (blksz);
if (! indir) {
return grub_errno;
}
if (grub_disk_read (data->disk,
((grub_disk_addr_t)
grub_le_to_cpu32 (inode->blocks.double_indir_block))
<< log2_blksz,
0, blksz, indir)) {
return grub_errno;
}
if (grub_disk_read (data->disk,
((grub_disk_addr_t)
grub_le_to_cpu32 (indir[rblock / perblock]))
<< log2_blksz,
0, blksz, indir)) {
return grub_errno;
}
blknr = grub_le_to_cpu32 (indir[rblock % perblock]);
grub_free (indir);
}
/* triple indirect. */
else
{
grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET,
"ext2fs doesn't support triple indirect blocks");
}
return blknr;
}
| 168,087 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: tt_cmap8_validate( FT_Byte* table,
FT_Validator valid )
{
FT_Byte* p = table + 4;
FT_Byte* is32;
FT_UInt32 length;
FT_UInt32 num_groups;
if ( table + 16 + 8192 > valid->limit )
FT_INVALID_TOO_SHORT;
length = TT_NEXT_ULONG( p );
if ( length > (FT_UInt32)( valid->limit - table ) || length < 8192 + 16 )
FT_INVALID_TOO_SHORT;
is32 = table + 12;
p = is32 + 8192; /* skip `is32' array */
num_groups = TT_NEXT_ULONG( p );
if ( p + num_groups * 12 > valid->limit )
FT_INVALID_TOO_SHORT;
/* check groups, they must be in increasing order */
FT_UInt32 n, start, end, start_id, count, last = 0;
for ( n = 0; n < num_groups; n++ )
{
FT_UInt hi, lo;
start = TT_NEXT_ULONG( p );
end = TT_NEXT_ULONG( p );
start_id = TT_NEXT_ULONG( p );
if ( start > end )
FT_INVALID_DATA;
if ( n > 0 && start <= last )
FT_INVALID_DATA;
if ( valid->level >= FT_VALIDATE_TIGHT )
{
if ( start_id + end - start >= TT_VALID_GLYPH_COUNT( valid ) )
FT_INVALID_GLYPH_ID;
count = (FT_UInt32)( end - start + 1 );
{
hi = (FT_UInt)( start >> 16 );
lo = (FT_UInt)( start & 0xFFFFU );
if ( (is32[hi >> 3] & ( 0x80 >> ( hi & 7 ) ) ) == 0 )
FT_INVALID_DATA;
if ( (is32[lo >> 3] & ( 0x80 >> ( lo & 7 ) ) ) == 0 )
FT_INVALID_DATA;
}
}
else
{
/* start_hi == 0; check that is32[i] is 0 for each i in */
/* the range [start..end] */
/* end_hi cannot be != 0! */
if ( end & ~0xFFFFU )
FT_INVALID_DATA;
for ( ; count > 0; count--, start++ )
{
lo = (FT_UInt)( start & 0xFFFFU );
if ( (is32[lo >> 3] & ( 0x80 >> ( lo & 7 ) ) ) != 0 )
FT_INVALID_DATA;
}
}
}
last = end;
}
Commit Message:
CWE ID: CWE-125 | tt_cmap8_validate( FT_Byte* table,
FT_Validator valid )
{
FT_Byte* p = table + 4;
FT_Byte* is32;
FT_UInt32 length;
FT_UInt32 num_groups;
if ( table + 16 + 8192 > valid->limit )
FT_INVALID_TOO_SHORT;
length = TT_NEXT_ULONG( p );
if ( length > (FT_UInt32)( valid->limit - table ) || length < 8192 + 16 )
FT_INVALID_TOO_SHORT;
is32 = table + 12;
p = is32 + 8192; /* skip `is32' array */
num_groups = TT_NEXT_ULONG( p );
/* p + num_groups * 12 > valid->limit ? */
if ( num_groups > (FT_UInt32)( valid->limit - p ) / 12 )
FT_INVALID_TOO_SHORT;
/* check groups, they must be in increasing order */
FT_UInt32 n, start, end, start_id, count, last = 0;
for ( n = 0; n < num_groups; n++ )
{
FT_UInt hi, lo;
start = TT_NEXT_ULONG( p );
end = TT_NEXT_ULONG( p );
start_id = TT_NEXT_ULONG( p );
if ( start > end )
FT_INVALID_DATA;
if ( n > 0 && start <= last )
FT_INVALID_DATA;
if ( valid->level >= FT_VALIDATE_TIGHT )
{
FT_UInt32 d = end - start;
/* start_id + end - start >= TT_VALID_GLYPH_COUNT( valid ) ? */
if ( d > TT_VALID_GLYPH_COUNT( valid ) ||
start_id >= TT_VALID_GLYPH_COUNT( valid ) - d )
FT_INVALID_GLYPH_ID;
count = (FT_UInt32)( end - start + 1 );
{
hi = (FT_UInt)( start >> 16 );
lo = (FT_UInt)( start & 0xFFFFU );
if ( (is32[hi >> 3] & ( 0x80 >> ( hi & 7 ) ) ) == 0 )
FT_INVALID_DATA;
if ( (is32[lo >> 3] & ( 0x80 >> ( lo & 7 ) ) ) == 0 )
FT_INVALID_DATA;
}
}
else
{
/* start_hi == 0; check that is32[i] is 0 for each i in */
/* the range [start..end] */
/* end_hi cannot be != 0! */
if ( end & ~0xFFFFU )
FT_INVALID_DATA;
for ( ; count > 0; count--, start++ )
{
lo = (FT_UInt)( start & 0xFFFFU );
if ( (is32[lo >> 3] & ( 0x80 >> ( lo & 7 ) ) ) != 0 )
FT_INVALID_DATA;
}
}
}
last = end;
}
| 164,845 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
LutContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
uint8_t *inrow, *outrow, *inrow0, *outrow0;
int i, j, plane, direct = 0;
if (av_frame_is_writable(in)) {
direct = 1;
out = in;
} else {
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
if (s->is_rgb) {
/* packed */
inrow0 = in ->data[0];
outrow0 = out->data[0];
for (i = 0; i < in->height; i ++) {
int w = inlink->w;
const uint8_t (*tab)[256] = (const uint8_t (*)[256])s->lut;
inrow = inrow0;
outrow = outrow0;
for (j = 0; j < w; j++) {
switch (s->step) {
case 4: outrow[3] = tab[3][inrow[3]]; // Fall-through
case 3: outrow[2] = tab[2][inrow[2]]; // Fall-through
case 2: outrow[1] = tab[1][inrow[1]]; // Fall-through
default: outrow[0] = tab[0][inrow[0]];
}
outrow += s->step;
inrow += s->step;
}
inrow0 += in ->linesize[0];
outrow0 += out->linesize[0];
}
} else {
/* planar */
for (plane = 0; plane < 4 && in->data[plane]; plane++) {
int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
int h = FF_CEIL_RSHIFT(inlink->h, vsub);
int w = FF_CEIL_RSHIFT(inlink->w, hsub);
inrow = in ->data[plane];
outrow = out->data[plane];
for (i = 0; i < h; i++) {
const uint8_t *tab = s->lut[plane];
for (j = 0; j < w; j++)
outrow[j] = tab[inrow[j]];
inrow += in ->linesize[plane];
outrow += out->linesize[plane];
}
}
}
if (!direct)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
Commit Message: avfilter: fix plane validity checks
Fixes out of array accesses
Signed-off-by: Michael Niedermayer <[email protected]>
CWE ID: CWE-119 | static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
LutContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
uint8_t *inrow, *outrow, *inrow0, *outrow0;
int i, j, plane, direct = 0;
if (av_frame_is_writable(in)) {
direct = 1;
out = in;
} else {
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
if (s->is_rgb) {
/* packed */
inrow0 = in ->data[0];
outrow0 = out->data[0];
for (i = 0; i < in->height; i ++) {
int w = inlink->w;
const uint8_t (*tab)[256] = (const uint8_t (*)[256])s->lut;
inrow = inrow0;
outrow = outrow0;
for (j = 0; j < w; j++) {
switch (s->step) {
case 4: outrow[3] = tab[3][inrow[3]]; // Fall-through
case 3: outrow[2] = tab[2][inrow[2]]; // Fall-through
case 2: outrow[1] = tab[1][inrow[1]]; // Fall-through
default: outrow[0] = tab[0][inrow[0]];
}
outrow += s->step;
inrow += s->step;
}
inrow0 += in ->linesize[0];
outrow0 += out->linesize[0];
}
} else {
/* planar */
for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
int vsub = plane == 1 || plane == 2 ? s->vsub : 0;
int hsub = plane == 1 || plane == 2 ? s->hsub : 0;
int h = FF_CEIL_RSHIFT(inlink->h, vsub);
int w = FF_CEIL_RSHIFT(inlink->w, hsub);
inrow = in ->data[plane];
outrow = out->data[plane];
for (i = 0; i < h; i++) {
const uint8_t *tab = s->lut[plane];
for (j = 0; j < w; j++)
outrow[j] = tab[inrow[j]];
inrow += in ->linesize[plane];
outrow += out->linesize[plane];
}
}
}
if (!direct)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
| 166,004 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static MagickBooleanType SetGrayscaleImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
PixelPacket
*colormap;
register ssize_t
i;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace);
colormap_index=(ssize_t *) AcquireQuantumMemory(MaxColormapSize,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
ExceptionInfo
*exception;
(void) ResetMagickMemory(colormap_index,(-1),MaxColormapSize*
sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=GetPixelRed(q);
image->colormap[image->colors].green=GetPixelGreen(q);
image->colormap[image->colors].blue=GetPixelBlue(q);
image->colors++;
}
}
SetPixelIndex(indexes+x,colormap_index[intensity]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].opacity=(unsigned short) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelPacket),
IntensityCompare);
colormap=(PixelPacket *) AcquireQuantumMemory(image->colors,
sizeof(*colormap));
if (colormap == (PixelPacket *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsSameColor(image,&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].opacity]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,colormap_index[ScaleQuantumToMap(GetPixelIndex(
indexes+x))]);
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,&image->exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
Commit Message: https://github.com/ImageMagick/ImageMagick/issues/574
CWE ID: CWE-772 | static MagickBooleanType SetGrayscaleImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
PixelPacket
*colormap;
register ssize_t
i;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace);
colormap_index=(ssize_t *) AcquireQuantumMemory(MaxColormapSize,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
ExceptionInfo
*exception;
(void) ResetMagickMemory(colormap_index,(-1),MaxColormapSize*
sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=GetPixelRed(q);
image->colormap[image->colors].green=GetPixelGreen(q);
image->colormap[image->colors].blue=GetPixelBlue(q);
image->colors++;
}
}
SetPixelIndex(indexes+x,colormap_index[intensity]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].opacity=(unsigned short) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelPacket),
IntensityCompare);
colormap=(PixelPacket *) AcquireQuantumMemory(image->colors,
sizeof(*colormap));
if (colormap == (PixelPacket *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsSameColor(image,&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].opacity]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,colormap_index[ScaleQuantumToMap(GetPixelIndex(
indexes+x))]);
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,&image->exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
| 167,975 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void DownloadUIAdapterDelegate::OpenItem(const OfflineItem& item,
int64_t offline_id) {
JNIEnv* env = AttachCurrentThread();
Java_OfflinePageDownloadBridge_openItem(
env, ConvertUTF8ToJavaString(env, item.page_url.spec()), offline_id);
}
Commit Message: Open Offline Pages in CCT from Downloads Home.
When the respective feature flag is enabled, offline pages opened from
the Downloads Home will use CCT instead of normal tabs.
Bug: 824807
Change-Id: I6d968b8b0c51aaeb7f26332c7ada9f927e151a65
Reviewed-on: https://chromium-review.googlesource.com/977321
Commit-Queue: Carlos Knippschild <[email protected]>
Reviewed-by: Ted Choc <[email protected]>
Reviewed-by: Bernhard Bauer <[email protected]>
Reviewed-by: Jian Li <[email protected]>
Cr-Commit-Position: refs/heads/master@{#546545}
CWE ID: CWE-264 | void DownloadUIAdapterDelegate::OpenItem(const OfflineItem& item,
int64_t offline_id) {
JNIEnv* env = AttachCurrentThread();
Java_OfflinePageDownloadBridge_openItem(
env, ConvertUTF8ToJavaString(env, item.page_url.spec()), offline_id,
offline_pages::ShouldOfflinePagesInDownloadHomeOpenInCct());
}
| 171,751 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: Accelerator GetAccelerator(KeyboardCode code, int mask) {
return Accelerator(code, mask & (1 << 0), mask & (1 << 1), mask & (1 << 2));
}
Commit Message: accelerators: Remove deprecated Accelerator ctor that takes booleans.
BUG=128242
[email protected]
Review URL: https://chromiumcodereview.appspot.com/10399085
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@137957 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | Accelerator GetAccelerator(KeyboardCode code, int mask) {
return Accelerator(code, mask);
}
| 170,907 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: HistogramBase* Histogram::Factory::Build() {
HistogramBase* histogram = StatisticsRecorder::FindHistogram(name_);
if (!histogram) {
const BucketRanges* created_ranges = CreateRanges();
const BucketRanges* registered_ranges =
StatisticsRecorder::RegisterOrDeleteDuplicateRanges(created_ranges);
if (bucket_count_ == 0) {
bucket_count_ = static_cast<uint32_t>(registered_ranges->bucket_count());
minimum_ = registered_ranges->range(1);
maximum_ = registered_ranges->range(bucket_count_ - 1);
}
PersistentHistogramAllocator::Reference histogram_ref = 0;
std::unique_ptr<HistogramBase> tentative_histogram;
PersistentHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
if (allocator) {
tentative_histogram = allocator->AllocateHistogram(
histogram_type_,
name_,
minimum_,
maximum_,
registered_ranges,
flags_,
&histogram_ref);
}
if (!tentative_histogram) {
DCHECK(!histogram_ref); // Should never have been set.
DCHECK(!allocator); // Shouldn't have failed.
flags_ &= ~HistogramBase::kIsPersistent;
tentative_histogram = HeapAlloc(registered_ranges);
tentative_histogram->SetFlags(flags_);
}
FillHistogram(tentative_histogram.get());
const void* tentative_histogram_ptr = tentative_histogram.get();
histogram = StatisticsRecorder::RegisterOrDeleteDuplicate(
tentative_histogram.release());
if (histogram_ref) {
allocator->FinalizeHistogram(histogram_ref,
histogram == tentative_histogram_ptr);
}
ReportHistogramActivity(*histogram, HISTOGRAM_CREATED);
} else {
ReportHistogramActivity(*histogram, HISTOGRAM_LOOKUP);
}
DCHECK_EQ(histogram_type_, histogram->GetHistogramType()) << name_;
if (bucket_count_ != 0 &&
!histogram->HasConstructionArguments(minimum_, maximum_, bucket_count_)) {
DLOG(ERROR) << "Histogram " << name_ << " has bad construction arguments";
return nullptr;
}
return histogram;
}
Commit Message: Convert DCHECKs to CHECKs for histogram types
When a histogram is looked up by name, there is currently a DCHECK that
verifies the type of the stored histogram matches the expected type.
A mismatch represents a significant problem because the returned
HistogramBase is cast to a Histogram in ValidateRangeChecksum,
potentially causing a crash.
This CL converts the DCHECK to a CHECK to prevent the possibility of
type confusion in release builds.
BUG=651443
[email protected]
Review-Url: https://codereview.chromium.org/2381893003
Cr-Commit-Position: refs/heads/master@{#421929}
CWE ID: CWE-476 | HistogramBase* Histogram::Factory::Build() {
HistogramBase* histogram = StatisticsRecorder::FindHistogram(name_);
if (!histogram) {
const BucketRanges* created_ranges = CreateRanges();
const BucketRanges* registered_ranges =
StatisticsRecorder::RegisterOrDeleteDuplicateRanges(created_ranges);
if (bucket_count_ == 0) {
bucket_count_ = static_cast<uint32_t>(registered_ranges->bucket_count());
minimum_ = registered_ranges->range(1);
maximum_ = registered_ranges->range(bucket_count_ - 1);
}
PersistentHistogramAllocator::Reference histogram_ref = 0;
std::unique_ptr<HistogramBase> tentative_histogram;
PersistentHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
if (allocator) {
tentative_histogram = allocator->AllocateHistogram(
histogram_type_,
name_,
minimum_,
maximum_,
registered_ranges,
flags_,
&histogram_ref);
}
if (!tentative_histogram) {
DCHECK(!histogram_ref); // Should never have been set.
DCHECK(!allocator); // Shouldn't have failed.
flags_ &= ~HistogramBase::kIsPersistent;
tentative_histogram = HeapAlloc(registered_ranges);
tentative_histogram->SetFlags(flags_);
}
FillHistogram(tentative_histogram.get());
const void* tentative_histogram_ptr = tentative_histogram.get();
histogram = StatisticsRecorder::RegisterOrDeleteDuplicate(
tentative_histogram.release());
if (histogram_ref) {
allocator->FinalizeHistogram(histogram_ref,
histogram == tentative_histogram_ptr);
}
ReportHistogramActivity(*histogram, HISTOGRAM_CREATED);
} else {
ReportHistogramActivity(*histogram, HISTOGRAM_LOOKUP);
}
CHECK_EQ(histogram_type_, histogram->GetHistogramType()) << name_;
if (bucket_count_ != 0 &&
!histogram->HasConstructionArguments(minimum_, maximum_, bucket_count_)) {
DLOG(ERROR) << "Histogram " << name_ << " has bad construction arguments";
return nullptr;
}
return histogram;
}
| 172,493 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int BN_GF2m_mod_inv(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
{
BIGNUM *b, *c = NULL, *u = NULL, *v = NULL, *tmp;
int ret = 0;
bn_check_top(a);
bn_check_top(p);
BN_CTX_start(ctx);
if ((b = BN_CTX_get(ctx)) == NULL)
goto err;
if ((c = BN_CTX_get(ctx)) == NULL)
goto err;
if ((u = BN_CTX_get(ctx)) == NULL)
goto err;
if ((v = BN_CTX_get(ctx)) == NULL)
goto err;
if (!BN_GF2m_mod(u, a, p))
goto err;
if (BN_is_zero(u))
goto err;
if (!BN_copy(v, p))
goto err;
# if 0
if (!BN_one(b))
goto err;
while (1) {
while (!BN_is_odd(u)) {
if (BN_is_zero(u))
goto err;
if (!BN_rshift1(u, u))
goto err;
if (BN_is_odd(b)) {
if (!BN_GF2m_add(b, b, p))
goto err;
}
if (!BN_rshift1(b, b))
goto err;
}
if (BN_abs_is_word(u, 1))
break;
if (BN_num_bits(u) < BN_num_bits(v)) {
tmp = u;
u = v;
v = tmp;
tmp = b;
b = c;
c = tmp;
}
if (!BN_GF2m_add(u, u, v))
goto err;
if (!BN_GF2m_add(b, b, c))
goto err;
}
# else
{
int i, ubits = BN_num_bits(u), vbits = BN_num_bits(v), /* v is copy
* of p */
top = p->top;
BN_ULONG *udp, *bdp, *vdp, *cdp;
bn_wexpand(u, top);
udp = u->d;
for (i = u->top; i < top; i++)
udp[i] = 0;
u->top = top;
bn_wexpand(b, top);
bdp = b->d;
bdp[0] = 1;
for (i = 1; i < top; i++)
bdp[i] = 0;
b->top = top;
bn_wexpand(c, top);
cdp = c->d;
for (i = 0; i < top; i++)
cdp[i] = 0;
c->top = top;
vdp = v->d; /* It pays off to "cache" *->d pointers,
* because it allows optimizer to be more
* aggressive. But we don't have to "cache"
* p->d, because *p is declared 'const'... */
while (1) {
while (ubits && !(udp[0] & 1)) {
BN_ULONG u0, u1, b0, b1, mask;
u0 = udp[0];
b0 = bdp[0];
mask = (BN_ULONG)0 - (b0 & 1);
b0 ^= p->d[0] & mask;
for (i = 0; i < top - 1; i++) {
u1 = udp[i + 1];
udp[i] = ((u0 >> 1) | (u1 << (BN_BITS2 - 1))) & BN_MASK2;
u0 = u1;
b1 = bdp[i + 1] ^ (p->d[i + 1] & mask);
bdp[i] = ((b0 >> 1) | (b1 << (BN_BITS2 - 1))) & BN_MASK2;
b0 = b1;
}
udp[i] = u0 >> 1;
bdp[i] = b0 >> 1;
ubits--;
}
if (ubits <= BN_BITS2 && udp[0] == 1)
break;
if (ubits < vbits) {
i = ubits;
ubits = vbits;
vbits = i;
tmp = u;
u = v;
v = tmp;
tmp = b;
b = c;
c = tmp;
udp = vdp;
vdp = v->d;
bdp = cdp;
cdp = c->d;
}
for (i = 0; i < top; i++) {
udp[i] ^= vdp[i];
bdp[i] ^= cdp[i];
}
if (ubits == vbits) {
BN_ULONG ul;
int utop = (ubits - 1) / BN_BITS2;
while ((ul = udp[utop]) == 0 && utop)
utop--;
ubits = utop * BN_BITS2 + BN_num_bits_word(ul);
}
}
bn_correct_top(b);
}
# endif
if (!BN_copy(r, b))
goto err;
bn_check_top(r);
ret = 1;
err:
# ifdef BN_DEBUG /* BN_CTX_end would complain about the
* expanded form */
bn_correct_top(c);
bn_correct_top(u);
bn_correct_top(v);
# endif
BN_CTX_end(ctx);
return ret;
}
Commit Message: bn/bn_gf2m.c: avoid infinite loop wich malformed ECParamters.
CVE-2015-1788
Reviewed-by: Matt Caswell <[email protected]>
CWE ID: CWE-399 | int BN_GF2m_mod_inv(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
{
BIGNUM *b, *c = NULL, *u = NULL, *v = NULL, *tmp;
int ret = 0;
bn_check_top(a);
bn_check_top(p);
BN_CTX_start(ctx);
if ((b = BN_CTX_get(ctx)) == NULL)
goto err;
if ((c = BN_CTX_get(ctx)) == NULL)
goto err;
if ((u = BN_CTX_get(ctx)) == NULL)
goto err;
if ((v = BN_CTX_get(ctx)) == NULL)
goto err;
if (!BN_GF2m_mod(u, a, p))
goto err;
if (BN_is_zero(u))
goto err;
if (!BN_copy(v, p))
goto err;
# if 0
if (!BN_one(b))
goto err;
while (1) {
while (!BN_is_odd(u)) {
if (BN_is_zero(u))
goto err;
if (!BN_rshift1(u, u))
goto err;
if (BN_is_odd(b)) {
if (!BN_GF2m_add(b, b, p))
goto err;
}
if (!BN_rshift1(b, b))
goto err;
}
if (BN_abs_is_word(u, 1))
break;
if (BN_num_bits(u) < BN_num_bits(v)) {
tmp = u;
u = v;
v = tmp;
tmp = b;
b = c;
c = tmp;
}
if (!BN_GF2m_add(u, u, v))
goto err;
if (!BN_GF2m_add(b, b, c))
goto err;
}
# else
{
int i;
int ubits = BN_num_bits(u);
int vbits = BN_num_bits(v); /* v is copy of p */
int top = p->top;
BN_ULONG *udp, *bdp, *vdp, *cdp;
bn_wexpand(u, top);
udp = u->d;
for (i = u->top; i < top; i++)
udp[i] = 0;
u->top = top;
bn_wexpand(b, top);
bdp = b->d;
bdp[0] = 1;
for (i = 1; i < top; i++)
bdp[i] = 0;
b->top = top;
bn_wexpand(c, top);
cdp = c->d;
for (i = 0; i < top; i++)
cdp[i] = 0;
c->top = top;
vdp = v->d; /* It pays off to "cache" *->d pointers,
* because it allows optimizer to be more
* aggressive. But we don't have to "cache"
* p->d, because *p is declared 'const'... */
while (1) {
while (ubits && !(udp[0] & 1)) {
BN_ULONG u0, u1, b0, b1, mask;
u0 = udp[0];
b0 = bdp[0];
mask = (BN_ULONG)0 - (b0 & 1);
b0 ^= p->d[0] & mask;
for (i = 0; i < top - 1; i++) {
u1 = udp[i + 1];
udp[i] = ((u0 >> 1) | (u1 << (BN_BITS2 - 1))) & BN_MASK2;
u0 = u1;
b1 = bdp[i + 1] ^ (p->d[i + 1] & mask);
bdp[i] = ((b0 >> 1) | (b1 << (BN_BITS2 - 1))) & BN_MASK2;
b0 = b1;
}
udp[i] = u0 >> 1;
bdp[i] = b0 >> 1;
ubits--;
}
if (ubits <= BN_BITS2) {
if (udp[0] == 0) /* poly was reducible */
goto err;
if (udp[0] == 1)
break;
}
if (ubits < vbits) {
i = ubits;
ubits = vbits;
vbits = i;
tmp = u;
u = v;
v = tmp;
tmp = b;
b = c;
c = tmp;
udp = vdp;
vdp = v->d;
bdp = cdp;
cdp = c->d;
}
for (i = 0; i < top; i++) {
udp[i] ^= vdp[i];
bdp[i] ^= cdp[i];
}
if (ubits == vbits) {
BN_ULONG ul;
int utop = (ubits - 1) / BN_BITS2;
while ((ul = udp[utop]) == 0 && utop)
utop--;
ubits = utop * BN_BITS2 + BN_num_bits_word(ul);
}
}
bn_correct_top(b);
}
# endif
if (!BN_copy(r, b))
goto err;
bn_check_top(r);
ret = 1;
err:
# ifdef BN_DEBUG /* BN_CTX_end would complain about the
* expanded form */
bn_correct_top(c);
bn_correct_top(u);
bn_correct_top(v);
# endif
BN_CTX_end(ctx);
return ret;
}
| 166,694 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: unsigned int svc_rdma_xdr_get_reply_hdr_len(__be32 *rdma_resp)
{
unsigned int nsegs;
__be32 *p;
p = rdma_resp;
/* RPC-over-RDMA V1 replies never have a Read list. */
p += rpcrdma_fixed_maxsz + 1;
/* Skip Write list. */
while (*p++ != xdr_zero) {
nsegs = be32_to_cpup(p++);
p += nsegs * rpcrdma_segment_maxsz;
}
/* Skip Reply chunk. */
if (*p++ != xdr_zero) {
nsegs = be32_to_cpup(p++);
p += nsegs * rpcrdma_segment_maxsz;
}
return (unsigned long)p - (unsigned long)rdma_resp;
}
Commit Message: Merge tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux
Pull nfsd updates from Bruce Fields:
"Another RDMA update from Chuck Lever, and a bunch of miscellaneous
bugfixes"
* tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux: (26 commits)
nfsd: Fix up the "supattr_exclcreat" attributes
nfsd: encoders mustn't use unitialized values in error cases
nfsd: fix undefined behavior in nfsd4_layout_verify
lockd: fix lockd shutdown race
NFSv4: Fix callback server shutdown
SUNRPC: Refactor svc_set_num_threads()
NFSv4.x/callback: Create the callback service through svc_create_pooled
lockd: remove redundant check on block
svcrdma: Clean out old XDR encoders
svcrdma: Remove the req_map cache
svcrdma: Remove unused RDMA Write completion handler
svcrdma: Reduce size of sge array in struct svc_rdma_op_ctxt
svcrdma: Clean up RPC-over-RDMA backchannel reply processing
svcrdma: Report Write/Reply chunk overruns
svcrdma: Clean up RDMA_ERROR path
svcrdma: Use rdma_rw API in RPC reply path
svcrdma: Introduce local rdma_rw API helpers
svcrdma: Clean up svc_rdma_get_inv_rkey()
svcrdma: Add helper to save pages under I/O
svcrdma: Eliminate RPCRDMA_SQ_DEPTH_MULT
...
CWE ID: CWE-404 | unsigned int svc_rdma_xdr_get_reply_hdr_len(__be32 *rdma_resp)
| 168,163 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int usb_get_bos_descriptor(struct usb_device *dev)
{
struct device *ddev = &dev->dev;
struct usb_bos_descriptor *bos;
struct usb_dev_cap_header *cap;
unsigned char *buffer;
int length, total_len, num, i;
int ret;
bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL);
if (!bos)
return -ENOMEM;
/* Get BOS descriptor */
ret = usb_get_descriptor(dev, USB_DT_BOS, 0, bos, USB_DT_BOS_SIZE);
if (ret < USB_DT_BOS_SIZE) {
dev_err(ddev, "unable to get BOS descriptor\n");
if (ret >= 0)
ret = -ENOMSG;
kfree(bos);
return ret;
}
length = bos->bLength;
total_len = le16_to_cpu(bos->wTotalLength);
num = bos->bNumDeviceCaps;
kfree(bos);
if (total_len < length)
return -EINVAL;
dev->bos = kzalloc(sizeof(struct usb_host_bos), GFP_KERNEL);
if (!dev->bos)
return -ENOMEM;
/* Now let's get the whole BOS descriptor set */
buffer = kzalloc(total_len, GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
goto err;
}
dev->bos->desc = (struct usb_bos_descriptor *)buffer;
ret = usb_get_descriptor(dev, USB_DT_BOS, 0, buffer, total_len);
if (ret < total_len) {
dev_err(ddev, "unable to get BOS descriptor set\n");
if (ret >= 0)
ret = -ENOMSG;
goto err;
}
total_len -= length;
for (i = 0; i < num; i++) {
buffer += length;
cap = (struct usb_dev_cap_header *)buffer;
length = cap->bLength;
if (total_len < length)
break;
total_len -= length;
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
dev_warn(ddev, "descriptor type invalid, skip\n");
continue;
}
switch (cap->bDevCapabilityType) {
case USB_CAP_TYPE_WIRELESS_USB:
/* Wireless USB cap descriptor is handled by wusb */
break;
case USB_CAP_TYPE_EXT:
dev->bos->ext_cap =
(struct usb_ext_cap_descriptor *)buffer;
break;
case USB_SS_CAP_TYPE:
dev->bos->ss_cap =
(struct usb_ss_cap_descriptor *)buffer;
break;
case USB_SSP_CAP_TYPE:
dev->bos->ssp_cap =
(struct usb_ssp_cap_descriptor *)buffer;
break;
case CONTAINER_ID_TYPE:
dev->bos->ss_id =
(struct usb_ss_container_id_descriptor *)buffer;
break;
case USB_PTM_CAP_TYPE:
dev->bos->ptm_cap =
(struct usb_ptm_cap_descriptor *)buffer;
default:
break;
}
}
return 0;
err:
usb_release_bos_descriptor(dev);
return ret;
}
Commit Message: USB: core: fix out-of-bounds access bug in usb_get_bos_descriptor()
Andrey used the syzkaller fuzzer to find an out-of-bounds memory
access in usb_get_bos_descriptor(). The code wasn't checking that the
next usb_dev_cap_header structure could fit into the remaining buffer
space.
This patch fixes the error and also reduces the bNumDeviceCaps field
in the header to match the actual number of capabilities found, in
cases where there are fewer than expected.
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: Alan Stern <[email protected]>
Tested-by: Andrey Konovalov <[email protected]>
CC: <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
CWE ID: CWE-125 | int usb_get_bos_descriptor(struct usb_device *dev)
{
struct device *ddev = &dev->dev;
struct usb_bos_descriptor *bos;
struct usb_dev_cap_header *cap;
unsigned char *buffer;
int length, total_len, num, i;
int ret;
bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL);
if (!bos)
return -ENOMEM;
/* Get BOS descriptor */
ret = usb_get_descriptor(dev, USB_DT_BOS, 0, bos, USB_DT_BOS_SIZE);
if (ret < USB_DT_BOS_SIZE) {
dev_err(ddev, "unable to get BOS descriptor\n");
if (ret >= 0)
ret = -ENOMSG;
kfree(bos);
return ret;
}
length = bos->bLength;
total_len = le16_to_cpu(bos->wTotalLength);
num = bos->bNumDeviceCaps;
kfree(bos);
if (total_len < length)
return -EINVAL;
dev->bos = kzalloc(sizeof(struct usb_host_bos), GFP_KERNEL);
if (!dev->bos)
return -ENOMEM;
/* Now let's get the whole BOS descriptor set */
buffer = kzalloc(total_len, GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
goto err;
}
dev->bos->desc = (struct usb_bos_descriptor *)buffer;
ret = usb_get_descriptor(dev, USB_DT_BOS, 0, buffer, total_len);
if (ret < total_len) {
dev_err(ddev, "unable to get BOS descriptor set\n");
if (ret >= 0)
ret = -ENOMSG;
goto err;
}
total_len -= length;
for (i = 0; i < num; i++) {
buffer += length;
cap = (struct usb_dev_cap_header *)buffer;
if (total_len < sizeof(*cap) || total_len < cap->bLength) {
dev->bos->desc->bNumDeviceCaps = i;
break;
}
length = cap->bLength;
total_len -= length;
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
dev_warn(ddev, "descriptor type invalid, skip\n");
continue;
}
switch (cap->bDevCapabilityType) {
case USB_CAP_TYPE_WIRELESS_USB:
/* Wireless USB cap descriptor is handled by wusb */
break;
case USB_CAP_TYPE_EXT:
dev->bos->ext_cap =
(struct usb_ext_cap_descriptor *)buffer;
break;
case USB_SS_CAP_TYPE:
dev->bos->ss_cap =
(struct usb_ss_cap_descriptor *)buffer;
break;
case USB_SSP_CAP_TYPE:
dev->bos->ssp_cap =
(struct usb_ssp_cap_descriptor *)buffer;
break;
case CONTAINER_ID_TYPE:
dev->bos->ss_id =
(struct usb_ss_container_id_descriptor *)buffer;
break;
case USB_PTM_CAP_TYPE:
dev->bos->ptm_cap =
(struct usb_ptm_cap_descriptor *)buffer;
default:
break;
}
}
return 0;
err:
usb_release_bos_descriptor(dev);
return ret;
}
| 167,675 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void PdfCompositorClient::Composite(
service_manager::Connector* connector,
base::SharedMemoryHandle handle,
size_t data_size,
mojom::PdfCompositor::CompositePdfCallback callback,
scoped_refptr<base::SequencedTaskRunner> callback_task_runner) {
DCHECK(data_size);
if (!compositor_)
Connect(connector);
mojo::ScopedSharedBufferHandle buffer_handle =
mojo::WrapSharedMemoryHandle(handle, data_size, true);
compositor_->CompositePdf(
std::move(buffer_handle),
base::BindOnce(&OnCompositePdf, base::Passed(&compositor_),
std::move(callback), callback_task_runner));
}
Commit Message: Correct mojo::WrapSharedMemoryHandle usage
Fixes some incorrect uses of mojo::WrapSharedMemoryHandle which
were assuming that the call actually has any control over the memory
protection applied to a handle when mapped.
Where fixing usage is infeasible for this CL, TODOs are added to
annotate follow-up work.
Also updates the API and documentation to (hopefully) improve clarity
and avoid similar mistakes from being made in the future.
BUG=792900
Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: I0578aaa9ca3bfcb01aaf2451315d1ede95458477
Reviewed-on: https://chromium-review.googlesource.com/818282
Reviewed-by: Wei Li <[email protected]>
Reviewed-by: Lei Zhang <[email protected]>
Reviewed-by: John Abd-El-Malek <[email protected]>
Reviewed-by: Daniel Cheng <[email protected]>
Reviewed-by: Sadrul Chowdhury <[email protected]>
Reviewed-by: Yuzhu Shen <[email protected]>
Reviewed-by: Robert Sesek <[email protected]>
Commit-Queue: Ken Rockot <[email protected]>
Cr-Commit-Position: refs/heads/master@{#530268}
CWE ID: CWE-787 | void PdfCompositorClient::Composite(
service_manager::Connector* connector,
base::SharedMemoryHandle handle,
size_t data_size,
mojom::PdfCompositor::CompositePdfCallback callback,
scoped_refptr<base::SequencedTaskRunner> callback_task_runner) {
DCHECK(data_size);
if (!compositor_)
Connect(connector);
mojo::ScopedSharedBufferHandle buffer_handle = mojo::WrapSharedMemoryHandle(
handle, data_size,
mojo::UnwrappedSharedMemoryHandleProtection::kReadOnly);
compositor_->CompositePdf(
std::move(buffer_handle),
base::BindOnce(&OnCompositePdf, base::Passed(&compositor_),
std::move(callback), callback_task_runner));
}
| 172,858 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool ExtensionTtsPlatformImplChromeOs::IsSpeaking() {
if (chromeos::CrosLibrary::Get()->EnsureLoaded()) {
return chromeos::CrosLibrary::Get()->GetSpeechSynthesisLibrary()->
IsSpeaking();
}
set_error(kCrosLibraryNotLoadedError);
return false;
}
Commit Message: Extend TTS extension API to support richer events returned from the engine
to the client. Previously we just had a completed event; this adds start,
word boundary, sentence boundary, and marker boundary. In addition,
interrupted and canceled, which were previously errors, now become events.
Mac and Windows implementations extended to support as many of these events
as possible.
BUG=67713
BUG=70198
BUG=75106
BUG=83404
TEST=Updates all TTS API tests to be event-based, and adds new tests.
Review URL: http://codereview.chromium.org/6792014
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@91665 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-20 | bool ExtensionTtsPlatformImplChromeOs::IsSpeaking() {
bool ExtensionTtsPlatformImplChromeOs::SendsEvent(TtsEventType event_type) {
return (event_type == TTS_EVENT_START ||
event_type == TTS_EVENT_END ||
event_type == TTS_EVENT_ERROR);
}
void ExtensionTtsPlatformImplChromeOs::PollUntilSpeechFinishes(
int utterance_id) {
if (utterance_id != utterance_id_) {
// This utterance must have been interrupted or cancelled.
return;
}
chromeos::CrosLibrary* cros_library = chromeos::CrosLibrary::Get();
ExtensionTtsController* controller = ExtensionTtsController::GetInstance();
if (!cros_library->EnsureLoaded()) {
controller->OnTtsEvent(
utterance_id_, TTS_EVENT_ERROR, 0, kCrosLibraryNotLoadedError);
return;
}
if (!cros_library->GetSpeechSynthesisLibrary()->IsSpeaking()) {
controller->OnTtsEvent(
utterance_id_, TTS_EVENT_END, utterance_length_, std::string());
return;
}
MessageLoop::current()->PostDelayedTask(
FROM_HERE, method_factory_.NewRunnableMethod(
&ExtensionTtsPlatformImplChromeOs::PollUntilSpeechFinishes,
utterance_id),
kSpeechCheckDelayIntervalMs);
}
| 170,398 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void PluginChannel::OnChannelError() {
base::CloseProcessHandle(renderer_handle_);
renderer_handle_ = 0;
NPChannelBase::OnChannelError();
CleanUp();
}
Commit Message: Convert plugin and GPU process to brokered handle duplication.
BUG=119250
Review URL: https://chromiumcodereview.appspot.com/9958034
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@132303 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: | void PluginChannel::OnChannelError() {
NPChannelBase::OnChannelError();
CleanUp();
}
| 170,949 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: PlatformNotificationData ToPlatformNotificationData(
const WebNotificationData& web_data) {
PlatformNotificationData platform_data;
platform_data.title = web_data.title;
switch (web_data.direction) {
case WebNotificationData::DirectionLeftToRight:
platform_data.direction =
PlatformNotificationData::DIRECTION_LEFT_TO_RIGHT;
break;
case WebNotificationData::DirectionRightToLeft:
platform_data.direction =
PlatformNotificationData::DIRECTION_RIGHT_TO_LEFT;
break;
case WebNotificationData::DirectionAuto:
platform_data.direction = PlatformNotificationData::DIRECTION_AUTO;
break;
}
platform_data.lang = base::UTF16ToUTF8(base::StringPiece16(web_data.lang));
platform_data.body = web_data.body;
platform_data.tag = base::UTF16ToUTF8(base::StringPiece16(web_data.tag));
platform_data.icon = blink::WebStringToGURL(web_data.icon.string());
platform_data.vibration_pattern.assign(web_data.vibrate.begin(),
web_data.vibrate.end());
platform_data.timestamp = base::Time::FromJsTime(web_data.timestamp);
platform_data.silent = web_data.silent;
platform_data.require_interaction = web_data.requireInteraction;
platform_data.data.assign(web_data.data.begin(), web_data.data.end());
platform_data.actions.resize(web_data.actions.size());
for (size_t i = 0; i < web_data.actions.size(); ++i) {
platform_data.actions[i].action =
base::UTF16ToUTF8(base::StringPiece16(web_data.actions[i].action));
platform_data.actions[i].title = web_data.actions[i].title;
}
return platform_data;
}
Commit Message: Notification actions may have an icon url.
This is behind a runtime flag for two reasons:
* The implementation is incomplete.
* We're still evaluating the API design.
Intent to Implement and Ship: Notification Action Icons
https://groups.google.com/a/chromium.org/d/msg/blink-dev/IM0HxOP7HOA/y8tu6iq1CgAJ
BUG=581336
Review URL: https://codereview.chromium.org/1644573002
Cr-Commit-Position: refs/heads/master@{#374649}
CWE ID: | PlatformNotificationData ToPlatformNotificationData(
const WebNotificationData& web_data) {
PlatformNotificationData platform_data;
platform_data.title = web_data.title;
switch (web_data.direction) {
case WebNotificationData::DirectionLeftToRight:
platform_data.direction =
PlatformNotificationData::DIRECTION_LEFT_TO_RIGHT;
break;
case WebNotificationData::DirectionRightToLeft:
platform_data.direction =
PlatformNotificationData::DIRECTION_RIGHT_TO_LEFT;
break;
case WebNotificationData::DirectionAuto:
platform_data.direction = PlatformNotificationData::DIRECTION_AUTO;
break;
}
platform_data.lang = base::UTF16ToUTF8(base::StringPiece16(web_data.lang));
platform_data.body = web_data.body;
platform_data.tag = base::UTF16ToUTF8(base::StringPiece16(web_data.tag));
platform_data.icon = blink::WebStringToGURL(web_data.icon.string());
platform_data.vibration_pattern.assign(web_data.vibrate.begin(),
web_data.vibrate.end());
platform_data.timestamp = base::Time::FromJsTime(web_data.timestamp);
platform_data.silent = web_data.silent;
platform_data.require_interaction = web_data.requireInteraction;
platform_data.data.assign(web_data.data.begin(), web_data.data.end());
platform_data.actions.resize(web_data.actions.size());
for (size_t i = 0; i < web_data.actions.size(); ++i) {
platform_data.actions[i].action =
base::UTF16ToUTF8(base::StringPiece16(web_data.actions[i].action));
platform_data.actions[i].title = web_data.actions[i].title;
platform_data.actions[i].icon =
blink::WebStringToGURL(web_data.actions[i].icon.string());
}
return platform_data;
}
| 171,631 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void CrosLibrary::TestApi::SetInputMethodLibrary(
InputMethodLibrary* library, bool own) {
library_->input_method_lib_.SetImpl(library, own);
}
Commit Message: chromeos: Replace copy-and-pasted code with macros.
This replaces a bunch of duplicated-per-library cros
function definitions and comments.
BUG=none
TEST=built it
Review URL: http://codereview.chromium.org/6086007
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@70070 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-189 | void CrosLibrary::TestApi::SetInputMethodLibrary(
| 170,638 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: ServiceWorkerContainer* NavigatorServiceWorker::serviceWorker(Navigator& navigator, ExceptionState& exceptionState)
{
return NavigatorServiceWorker::from(navigator).serviceWorker(exceptionState);
}
Commit Message: Add ASSERT() to avoid accidental leaking ServiceWorkerContainer to cross origin context.
BUG=522791
Review URL: https://codereview.chromium.org/1305903007
git-svn-id: svn://svn.chromium.org/blink/trunk@201889 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-264 | ServiceWorkerContainer* NavigatorServiceWorker::serviceWorker(Navigator& navigator, ExceptionState& exceptionState)
ServiceWorkerContainer* NavigatorServiceWorker::serviceWorker(ExecutionContext* executionContext, Navigator& navigator, ExceptionState& exceptionState)
{
ASSERT(!navigator.frame() || executionContext->securityOrigin()->canAccessCheckSuborigins(navigator.frame()->securityContext()->securityOrigin()));
return NavigatorServiceWorker::from(navigator).serviceWorker(exceptionState);
}
| 171,862 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: on_register_handler(TCMUService1HandlerManager1 *interface,
GDBusMethodInvocation *invocation,
gchar *subtype,
gchar *cfg_desc,
gpointer user_data)
{
struct tcmur_handler *handler;
struct dbus_info *info;
char *bus_name;
bus_name = g_strdup_printf("org.kernel.TCMUService1.HandlerManager1.%s",
subtype);
handler = g_new0(struct tcmur_handler, 1);
handler->subtype = g_strdup(subtype);
handler->cfg_desc = g_strdup(cfg_desc);
handler->open = dbus_handler_open;
handler->close = dbus_handler_close;
handler->handle_cmd = dbus_handler_handle_cmd;
info = g_new0(struct dbus_info, 1);
info->register_invocation = invocation;
info->watcher_id = g_bus_watch_name(G_BUS_TYPE_SYSTEM,
bus_name,
G_BUS_NAME_WATCHER_FLAGS_NONE,
on_handler_appeared,
on_handler_vanished,
handler,
NULL);
g_free(bus_name);
handler->opaque = info;
return TRUE;
}
Commit Message: only allow dynamic UnregisterHandler for external handlers, thereby fixing DoS
Trying to unregister an internal handler ended up in a SEGFAULT, because
the tcmur_handler->opaque was NULL. Way to reproduce:
dbus-send --system --print-reply --dest=org.kernel.TCMUService1 /org/kernel/TCMUService1/HandlerManager1 org.kernel.TCMUService1.HandlerManager1.UnregisterHandler string:qcow
we use a newly introduced boolean in struct tcmur_handler for keeping
track of external handlers. As suggested by mikechristie adjusting the
public data structure is acceptable.
CWE ID: CWE-476 | on_register_handler(TCMUService1HandlerManager1 *interface,
GDBusMethodInvocation *invocation,
gchar *subtype,
gchar *cfg_desc,
gpointer user_data)
{
struct tcmur_handler *handler;
struct dbus_info *info;
char *bus_name;
bus_name = g_strdup_printf("org.kernel.TCMUService1.HandlerManager1.%s",
subtype);
handler = g_new0(struct tcmur_handler, 1);
handler->subtype = g_strdup(subtype);
handler->cfg_desc = g_strdup(cfg_desc);
handler->open = dbus_handler_open;
handler->close = dbus_handler_close;
handler->handle_cmd = dbus_handler_handle_cmd;
info = g_new0(struct dbus_info, 1);
handler->opaque = info;
handler->_is_dbus_handler = 1;
info->register_invocation = invocation;
info->watcher_id = g_bus_watch_name(G_BUS_TYPE_SYSTEM,
bus_name,
G_BUS_NAME_WATCHER_FLAGS_NONE,
on_handler_appeared,
on_handler_vanished,
handler,
NULL);
g_free(bus_name);
handler->opaque = info;
return TRUE;
}
| 167,633 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int snd_timer_start_slave(struct snd_timer_instance *timeri)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
if (timeri->master)
list_add_tail(&timeri->active_list,
&timeri->master->slave_active_head);
spin_unlock_irqrestore(&slave_active_lock, flags);
return 1; /* delayed start */
}
Commit Message: ALSA: timer: Harden slave timer list handling
A slave timer instance might be still accessible in a racy way while
operating the master instance as it lacks of locking. Since the
master operation is mostly protected with timer->lock, we should cope
with it while changing the slave instance, too. Also, some linked
lists (active_list and ack_list) of slave instances aren't unlinked
immediately at stopping or closing, and this may lead to unexpected
accesses.
This patch tries to address these issues. It adds spin lock of
timer->lock (either from master or slave, which is equivalent) in a
few places. For avoiding a deadlock, we ensure that the global
slave_active_lock is always locked at first before each timer lock.
Also, ack and active_list of slave instances are properly unlinked at
snd_timer_stop() and snd_timer_close().
Last but not least, remove the superfluous call of _snd_timer_stop()
at removing slave links. This is a noop, and calling it may confuse
readers wrt locking. Further cleanup will follow in a later patch.
Actually we've got reports of use-after-free by syzkaller fuzzer, and
this hopefully fixes these issues.
Reported-by: Dmitry Vyukov <[email protected]>
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
CWE ID: CWE-20 | static int snd_timer_start_slave(struct snd_timer_instance *timeri)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
if (timeri->master && timeri->timer) {
spin_lock(&timeri->timer->lock);
list_add_tail(&timeri->active_list,
&timeri->master->slave_active_head);
spin_unlock(&timeri->timer->lock);
}
spin_unlock_irqrestore(&slave_active_lock, flags);
return 1; /* delayed start */
}
| 167,403 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void PepperRendererConnection::OnMsgDidCreateInProcessInstance(
PP_Instance instance,
const PepperRendererInstanceData& instance_data) {
PepperRendererInstanceData data = instance_data;
data.render_process_id = render_process_id_;
in_process_host_->AddInstance(instance, data);
}
Commit Message: Validate in-process plugin instance messages.
Bug: 733548, 733549
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_site_isolation
Change-Id: Ie5572c7bcafa05399b09c44425ddd5ce9b9e4cba
Reviewed-on: https://chromium-review.googlesource.com/538908
Commit-Queue: Bill Budge <[email protected]>
Reviewed-by: Raymes Khoury <[email protected]>
Cr-Commit-Position: refs/heads/master@{#480696}
CWE ID: CWE-20 | void PepperRendererConnection::OnMsgDidCreateInProcessInstance(
PP_Instance instance,
const PepperRendererInstanceData& instance_data) {
PepperRendererInstanceData data = instance_data;
// It's important that we supply the render process ID ourselves since the
// message may be coming from a compromised renderer.
data.render_process_id = render_process_id_;
// 'instance' is possibly invalid. The host must be careful not to trust it.
in_process_host_->AddInstance(instance, data);
}
| 172,311 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void MediaStreamDispatcherHost::DoOpenDevice(
int32_t page_request_id,
const std::string& device_id,
blink::MediaStreamType type,
OpenDeviceCallback callback,
MediaDeviceSaltAndOrigin salt_and_origin) {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
if (!MediaStreamManager::IsOriginAllowed(render_process_id_,
salt_and_origin.origin)) {
std::move(callback).Run(false /* success */, std::string(),
blink::MediaStreamDevice());
return;
}
media_stream_manager_->OpenDevice(
render_process_id_, render_frame_id_, page_request_id, requester_id_,
device_id, type, std::move(salt_and_origin), std::move(callback),
base::BindRepeating(&MediaStreamDispatcherHost::OnDeviceStopped,
weak_factory_.GetWeakPtr()));
}
Commit Message: [MediaStream] Pass request ID parameters in the right order for OpenDevice()
Prior to this CL, requester_id and page_request_id parameters were
passed in incorrect order from MediaStreamDispatcherHost to
MediaStreamManager for the OpenDevice() operation, which could lead to
errors.
Bug: 948564
Change-Id: Iadcf3fe26adaac50564102138ce212269cf32d62
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1569113
Reviewed-by: Marina Ciocea <[email protected]>
Commit-Queue: Guido Urdaneta <[email protected]>
Cr-Commit-Position: refs/heads/master@{#651255}
CWE ID: CWE-119 | void MediaStreamDispatcherHost::DoOpenDevice(
int32_t page_request_id,
const std::string& device_id,
blink::MediaStreamType type,
OpenDeviceCallback callback,
MediaDeviceSaltAndOrigin salt_and_origin) {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
if (!MediaStreamManager::IsOriginAllowed(render_process_id_,
salt_and_origin.origin)) {
std::move(callback).Run(false /* success */, std::string(),
blink::MediaStreamDevice());
return;
}
media_stream_manager_->OpenDevice(
render_process_id_, render_frame_id_, requester_id_, page_request_id,
device_id, type, std::move(salt_and_origin), std::move(callback),
base::BindRepeating(&MediaStreamDispatcherHost::OnDeviceStopped,
weak_factory_.GetWeakPtr()));
}
| 173,015 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: bool WebGLImageConversion::ExtractTextureData(unsigned width,
unsigned height,
GLenum format,
GLenum type,
unsigned unpack_alignment,
bool flip_y,
bool premultiply_alpha,
const void* pixels,
Vector<uint8_t>& data) {
DataFormat source_data_format = GetDataFormat(format, type);
if (source_data_format == kDataFormatNumFormats)
return false;
unsigned int components_per_pixel, bytes_per_component;
if (!ComputeFormatAndTypeParameters(format, type, &components_per_pixel,
&bytes_per_component))
return false;
unsigned bytes_per_pixel = components_per_pixel * bytes_per_component;
data.resize(width * height * bytes_per_pixel);
if (!PackPixels(static_cast<const uint8_t*>(pixels), source_data_format,
width, height, IntRect(0, 0, width, height), 1,
unpack_alignment, 0, format, type,
(premultiply_alpha ? kAlphaDoPremultiply : kAlphaDoNothing),
data.data(), flip_y))
return false;
return true;
}
Commit Message: Implement 2D texture uploading from client array with FLIP_Y or PREMULTIPLY_ALPHA.
BUG=774174
TEST=https://github.com/KhronosGroup/WebGL/pull/2555
[email protected]
Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: I4f4e7636314502451104730501a5048a5d7b9f3f
Reviewed-on: https://chromium-review.googlesource.com/808665
Commit-Queue: Zhenyao Mo <[email protected]>
Reviewed-by: Kenneth Russell <[email protected]>
Cr-Commit-Position: refs/heads/master@{#522003}
CWE ID: CWE-125 | bool WebGLImageConversion::ExtractTextureData(unsigned width,
bool WebGLImageConversion::ExtractTextureData(
unsigned width,
unsigned height,
GLenum format,
GLenum type,
const PixelStoreParams& unpack_params,
bool flip_y,
bool premultiply_alpha,
const void* pixels,
Vector<uint8_t>& data) {
DataFormat source_data_format = GetDataFormat(format, type);
if (source_data_format == kDataFormatNumFormats)
return false;
unsigned int components_per_pixel, bytes_per_component;
if (!ComputeFormatAndTypeParameters(format, type, &components_per_pixel,
&bytes_per_component))
return false;
unsigned bytes_per_pixel = components_per_pixel * bytes_per_component;
data.resize(width * height * bytes_per_pixel);
unsigned image_size_in_bytes, skip_size_in_bytes;
ComputeImageSizeInBytes(format, type, width, height, 1, unpack_params,
&image_size_in_bytes, nullptr, &skip_size_in_bytes);
const uint8_t* src_data = static_cast<const uint8_t*>(pixels);
if (skip_size_in_bytes) {
src_data += skip_size_in_bytes;
}
if (!PackPixels(src_data, source_data_format,
unpack_params.row_length ? unpack_params.row_length : width,
height, IntRect(0, 0, width, height), 1,
unpack_params.alignment, 0, format, type,
(premultiply_alpha ? kAlphaDoPremultiply : kAlphaDoNothing),
data.data(), flip_y))
return false;
return true;
}
| 172,682 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void ExtensionBrowserTest::OpenWindow(content::WebContents* contents,
const GURL& url,
bool newtab_process_should_equal_opener,
content::WebContents** newtab_result) {
content::WebContentsAddedObserver tab_added_observer;
ASSERT_TRUE(content::ExecuteScript(contents,
"window.open('" + url.spec() + "');"));
content::WebContents* newtab = tab_added_observer.GetWebContents();
ASSERT_TRUE(newtab);
WaitForLoadStop(newtab);
EXPECT_EQ(url, newtab->GetLastCommittedURL());
if (newtab_process_should_equal_opener) {
EXPECT_EQ(contents->GetMainFrame()->GetSiteInstance(),
newtab->GetMainFrame()->GetSiteInstance());
} else {
EXPECT_NE(contents->GetMainFrame()->GetSiteInstance(),
newtab->GetMainFrame()->GetSiteInstance());
}
if (newtab_result)
*newtab_result = newtab;
}
Commit Message: [Extensions] Update navigations across hypothetical extension extents
Update code to treat navigations across hypothetical extension extents
(e.g. for nonexistent extensions) the same as we do for navigations
crossing installed extension extents.
Bug: 598265
Change-Id: Ibdf2f563ce1fd108ead279077901020a24de732b
Reviewed-on: https://chromium-review.googlesource.com/617180
Commit-Queue: Devlin <[email protected]>
Reviewed-by: Alex Moshchuk <[email protected]>
Reviewed-by: Nasko Oskov <[email protected]>
Cr-Commit-Position: refs/heads/master@{#495779}
CWE ID: | void ExtensionBrowserTest::OpenWindow(content::WebContents* contents,
const GURL& url,
bool newtab_process_should_equal_opener,
bool should_succeed,
content::WebContents** newtab_result) {
content::WebContentsAddedObserver tab_added_observer;
ASSERT_TRUE(content::ExecuteScript(contents,
"window.open('" + url.spec() + "');"));
content::WebContents* newtab = tab_added_observer.GetWebContents();
ASSERT_TRUE(newtab);
WaitForLoadStop(newtab);
if (should_succeed) {
EXPECT_EQ(url, newtab->GetLastCommittedURL());
EXPECT_EQ(content::PAGE_TYPE_NORMAL,
newtab->GetController().GetLastCommittedEntry()->GetPageType());
} else {
// "Failure" comes in two forms: redirecting to about:blank or showing an
// error page. At least one should be true.
EXPECT_TRUE(
newtab->GetLastCommittedURL() == GURL(url::kAboutBlankURL) ||
newtab->GetController().GetLastCommittedEntry()->GetPageType() ==
content::PAGE_TYPE_ERROR);
}
if (newtab_process_should_equal_opener) {
EXPECT_EQ(contents->GetMainFrame()->GetSiteInstance(),
newtab->GetMainFrame()->GetSiteInstance());
} else {
EXPECT_NE(contents->GetMainFrame()->GetSiteInstance(),
newtab->GetMainFrame()->GetSiteInstance());
}
if (newtab_result)
*newtab_result = newtab;
}
| 172,958 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
unsigned int cryptlen)
{
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
AHASH_REQUEST_ON_STACK(ahreq, ctx->mac);
unsigned int assoclen = req->assoclen;
struct scatterlist sg[3];
u8 odata[16];
u8 idata[16];
int ilen, err;
/* format control data for input */
err = format_input(odata, req, cryptlen);
if (err)
goto out;
sg_init_table(sg, 3);
sg_set_buf(&sg[0], odata, 16);
/* format associated data and compute into mac */
if (assoclen) {
ilen = format_adata(idata, assoclen);
sg_set_buf(&sg[1], idata, ilen);
sg_chain(sg, 3, req->src);
} else {
ilen = 0;
sg_chain(sg, 2, req->src);
}
ahash_request_set_tfm(ahreq, ctx->mac);
ahash_request_set_callback(ahreq, pctx->flags, NULL, NULL);
ahash_request_set_crypt(ahreq, sg, NULL, assoclen + ilen + 16);
err = crypto_ahash_init(ahreq);
if (err)
goto out;
err = crypto_ahash_update(ahreq);
if (err)
goto out;
/* we need to pad the MAC input to a round multiple of the block size */
ilen = 16 - (assoclen + ilen) % 16;
if (ilen < 16) {
memset(idata, 0, ilen);
sg_init_table(sg, 2);
sg_set_buf(&sg[0], idata, ilen);
if (plain)
sg_chain(sg, 2, plain);
plain = sg;
cryptlen += ilen;
}
ahash_request_set_crypt(ahreq, plain, pctx->odata, cryptlen);
err = crypto_ahash_finup(ahreq);
out:
return err;
}
Commit Message: crypto: ccm - move cbcmac input off the stack
Commit f15f05b0a5de ("crypto: ccm - switch to separate cbcmac driver")
refactored the CCM driver to allow separate implementations of the
underlying MAC to be provided by a platform. However, in doing so, it
moved some data from the linear region to the stack, which violates the
SG constraints when the stack is virtually mapped.
So move idata/odata back to the request ctx struct, of which we can
reasonably expect that it has been allocated using kmalloc() et al.
Reported-by: Johannes Berg <[email protected]>
Fixes: f15f05b0a5de ("crypto: ccm - switch to separate cbcmac driver")
Signed-off-by: Ard Biesheuvel <[email protected]>
Tested-by: Johannes Berg <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
CWE ID: CWE-119 | static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
unsigned int cryptlen)
{
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
AHASH_REQUEST_ON_STACK(ahreq, ctx->mac);
unsigned int assoclen = req->assoclen;
struct scatterlist sg[3];
u8 *odata = pctx->odata;
u8 *idata = pctx->idata;
int ilen, err;
/* format control data for input */
err = format_input(odata, req, cryptlen);
if (err)
goto out;
sg_init_table(sg, 3);
sg_set_buf(&sg[0], odata, 16);
/* format associated data and compute into mac */
if (assoclen) {
ilen = format_adata(idata, assoclen);
sg_set_buf(&sg[1], idata, ilen);
sg_chain(sg, 3, req->src);
} else {
ilen = 0;
sg_chain(sg, 2, req->src);
}
ahash_request_set_tfm(ahreq, ctx->mac);
ahash_request_set_callback(ahreq, pctx->flags, NULL, NULL);
ahash_request_set_crypt(ahreq, sg, NULL, assoclen + ilen + 16);
err = crypto_ahash_init(ahreq);
if (err)
goto out;
err = crypto_ahash_update(ahreq);
if (err)
goto out;
/* we need to pad the MAC input to a round multiple of the block size */
ilen = 16 - (assoclen + ilen) % 16;
if (ilen < 16) {
memset(idata, 0, ilen);
sg_init_table(sg, 2);
sg_set_buf(&sg[0], idata, ilen);
if (plain)
sg_chain(sg, 2, plain);
plain = sg;
cryptlen += ilen;
}
ahash_request_set_crypt(ahreq, plain, pctx->odata, cryptlen);
err = crypto_ahash_finup(ahreq);
out:
return err;
}
| 168,221 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void btif_dm_upstreams_evt(UINT16 event, char* p_param)
{
tBTA_DM_SEC *p_data = (tBTA_DM_SEC*)p_param;
tBTA_SERVICE_MASK service_mask;
uint32_t i;
bt_bdaddr_t bd_addr;
BTIF_TRACE_EVENT("btif_dm_upstreams_cback ev: %s", dump_dm_event(event));
switch (event)
{
case BTA_DM_ENABLE_EVT:
{
BD_NAME bdname;
bt_status_t status;
bt_property_t prop;
prop.type = BT_PROPERTY_BDNAME;
prop.len = BD_NAME_LEN;
prop.val = (void*)bdname;
status = btif_storage_get_adapter_property(&prop);
if (status == BT_STATUS_SUCCESS)
{
/* A name exists in the storage. Make this the device name */
BTA_DmSetDeviceName((char*)prop.val);
}
else
{
/* Storage does not have a name yet.
* Use the default name and write it to the chip
*/
BTA_DmSetDeviceName(btif_get_default_local_name());
}
#if (defined(BLE_INCLUDED) && (BLE_INCLUDED == TRUE))
/* Enable local privacy */
BTA_DmBleConfigLocalPrivacy(BLE_LOCAL_PRIVACY_ENABLED);
#endif
/* for each of the enabled services in the mask, trigger the profile
* enable */
service_mask = btif_get_enabled_services_mask();
for (i=0; i <= BTA_MAX_SERVICE_ID; i++)
{
if (service_mask &
(tBTA_SERVICE_MASK)(BTA_SERVICE_ID_TO_SERVICE_MASK(i)))
{
btif_in_execute_service_request(i, TRUE);
}
}
/* clear control blocks */
memset(&pairing_cb, 0, sizeof(btif_dm_pairing_cb_t));
pairing_cb.bond_type = BOND_TYPE_PERSISTENT;
/* This function will also trigger the adapter_properties_cb
** and bonded_devices_info_cb
*/
btif_storage_load_bonded_devices();
btif_storage_load_autopair_device_list();
btif_enable_bluetooth_evt(p_data->enable.status);
}
break;
case BTA_DM_DISABLE_EVT:
/* for each of the enabled services in the mask, trigger the profile
* disable */
service_mask = btif_get_enabled_services_mask();
for (i=0; i <= BTA_MAX_SERVICE_ID; i++)
{
if (service_mask &
(tBTA_SERVICE_MASK)(BTA_SERVICE_ID_TO_SERVICE_MASK(i)))
{
btif_in_execute_service_request(i, FALSE);
}
}
btif_disable_bluetooth_evt();
break;
case BTA_DM_PIN_REQ_EVT:
btif_dm_pin_req_evt(&p_data->pin_req);
break;
case BTA_DM_AUTH_CMPL_EVT:
btif_dm_auth_cmpl_evt(&p_data->auth_cmpl);
break;
case BTA_DM_BOND_CANCEL_CMPL_EVT:
if (pairing_cb.state == BT_BOND_STATE_BONDING)
{
bdcpy(bd_addr.address, pairing_cb.bd_addr);
btm_set_bond_type_dev(pairing_cb.bd_addr, BOND_TYPE_UNKNOWN);
bond_state_changed(p_data->bond_cancel_cmpl.result, &bd_addr, BT_BOND_STATE_NONE);
}
break;
case BTA_DM_SP_CFM_REQ_EVT:
btif_dm_ssp_cfm_req_evt(&p_data->cfm_req);
break;
case BTA_DM_SP_KEY_NOTIF_EVT:
btif_dm_ssp_key_notif_evt(&p_data->key_notif);
break;
case BTA_DM_DEV_UNPAIRED_EVT:
bdcpy(bd_addr.address, p_data->link_down.bd_addr);
btm_set_bond_type_dev(p_data->link_down.bd_addr, BOND_TYPE_UNKNOWN);
/*special handling for HID devices */
#if (defined(BTA_HH_INCLUDED) && (BTA_HH_INCLUDED == TRUE))
btif_hh_remove_device(bd_addr);
#endif
btif_storage_remove_bonded_device(&bd_addr);
bond_state_changed(BT_STATUS_SUCCESS, &bd_addr, BT_BOND_STATE_NONE);
break;
case BTA_DM_BUSY_LEVEL_EVT:
{
if (p_data->busy_level.level_flags & BTM_BL_INQUIRY_PAGING_MASK)
{
if (p_data->busy_level.level_flags == BTM_BL_INQUIRY_STARTED)
{
HAL_CBACK(bt_hal_cbacks, discovery_state_changed_cb,
BT_DISCOVERY_STARTED);
btif_dm_inquiry_in_progress = TRUE;
}
else if (p_data->busy_level.level_flags == BTM_BL_INQUIRY_CANCELLED)
{
HAL_CBACK(bt_hal_cbacks, discovery_state_changed_cb,
BT_DISCOVERY_STOPPED);
btif_dm_inquiry_in_progress = FALSE;
}
else if (p_data->busy_level.level_flags == BTM_BL_INQUIRY_COMPLETE)
{
btif_dm_inquiry_in_progress = FALSE;
}
}
}break;
case BTA_DM_LINK_UP_EVT:
bdcpy(bd_addr.address, p_data->link_up.bd_addr);
BTIF_TRACE_DEBUG("BTA_DM_LINK_UP_EVT. Sending BT_ACL_STATE_CONNECTED");
btif_update_remote_version_property(&bd_addr);
HAL_CBACK(bt_hal_cbacks, acl_state_changed_cb, BT_STATUS_SUCCESS,
&bd_addr, BT_ACL_STATE_CONNECTED);
break;
case BTA_DM_LINK_DOWN_EVT:
bdcpy(bd_addr.address, p_data->link_down.bd_addr);
btm_set_bond_type_dev(p_data->link_down.bd_addr, BOND_TYPE_UNKNOWN);
BTIF_TRACE_DEBUG("BTA_DM_LINK_DOWN_EVT. Sending BT_ACL_STATE_DISCONNECTED");
HAL_CBACK(bt_hal_cbacks, acl_state_changed_cb, BT_STATUS_SUCCESS,
&bd_addr, BT_ACL_STATE_DISCONNECTED);
break;
case BTA_DM_HW_ERROR_EVT:
BTIF_TRACE_ERROR("Received H/W Error. ");
/* Flush storage data */
btif_config_flush();
usleep(100000); /* 100milliseconds */
/* Killing the process to force a restart as part of fault tolerance */
kill(getpid(), SIGKILL);
break;
#if (defined(BLE_INCLUDED) && (BLE_INCLUDED == TRUE))
case BTA_DM_BLE_KEY_EVT:
BTIF_TRACE_DEBUG("BTA_DM_BLE_KEY_EVT key_type=0x%02x ", p_data->ble_key.key_type);
/* If this pairing is by-product of local initiated GATT client Read or Write,
BTA would not have sent BTA_DM_BLE_SEC_REQ_EVT event and Bond state would not
have setup properly. Setup pairing_cb and notify App about Bonding state now*/
if (pairing_cb.state != BT_BOND_STATE_BONDING)
{
BTIF_TRACE_DEBUG("Bond state not sent to App so far.Notify the app now");
bond_state_changed(BT_STATUS_SUCCESS, (bt_bdaddr_t*)p_data->ble_key.bd_addr,
BT_BOND_STATE_BONDING);
}
else if (memcmp (pairing_cb.bd_addr, p_data->ble_key.bd_addr, BD_ADDR_LEN)!=0)
{
BTIF_TRACE_ERROR("BD mismatch discard BLE key_type=%d ",p_data->ble_key.key_type);
break;
}
switch (p_data->ble_key.key_type)
{
case BTA_LE_KEY_PENC:
BTIF_TRACE_DEBUG("Rcv BTA_LE_KEY_PENC");
pairing_cb.ble.is_penc_key_rcvd = TRUE;
pairing_cb.ble.penc_key = p_data->ble_key.p_key_value->penc_key;
break;
case BTA_LE_KEY_PID:
BTIF_TRACE_DEBUG("Rcv BTA_LE_KEY_PID");
pairing_cb.ble.is_pid_key_rcvd = TRUE;
pairing_cb.ble.pid_key = p_data->ble_key.p_key_value->pid_key;
break;
case BTA_LE_KEY_PCSRK:
BTIF_TRACE_DEBUG("Rcv BTA_LE_KEY_PCSRK");
pairing_cb.ble.is_pcsrk_key_rcvd = TRUE;
pairing_cb.ble.pcsrk_key = p_data->ble_key.p_key_value->pcsrk_key;
break;
case BTA_LE_KEY_LENC:
BTIF_TRACE_DEBUG("Rcv BTA_LE_KEY_LENC");
pairing_cb.ble.is_lenc_key_rcvd = TRUE;
pairing_cb.ble.lenc_key = p_data->ble_key.p_key_value->lenc_key;
break;
case BTA_LE_KEY_LCSRK:
BTIF_TRACE_DEBUG("Rcv BTA_LE_KEY_LCSRK");
pairing_cb.ble.is_lcsrk_key_rcvd = TRUE;
pairing_cb.ble.lcsrk_key = p_data->ble_key.p_key_value->lcsrk_key;
break;
case BTA_LE_KEY_LID:
BTIF_TRACE_DEBUG("Rcv BTA_LE_KEY_LID");
pairing_cb.ble.is_lidk_key_rcvd = TRUE;
break;
default:
BTIF_TRACE_ERROR("unknown BLE key type (0x%02x)", p_data->ble_key.key_type);
break;
}
break;
case BTA_DM_BLE_SEC_REQ_EVT:
BTIF_TRACE_DEBUG("BTA_DM_BLE_SEC_REQ_EVT. ");
btif_dm_ble_sec_req_evt(&p_data->ble_req);
break;
case BTA_DM_BLE_PASSKEY_NOTIF_EVT:
BTIF_TRACE_DEBUG("BTA_DM_BLE_PASSKEY_NOTIF_EVT. ");
btif_dm_ble_key_notif_evt(&p_data->key_notif);
break;
case BTA_DM_BLE_PASSKEY_REQ_EVT:
BTIF_TRACE_DEBUG("BTA_DM_BLE_PASSKEY_REQ_EVT. ");
btif_dm_ble_passkey_req_evt(&p_data->pin_req);
break;
case BTA_DM_BLE_NC_REQ_EVT:
BTIF_TRACE_DEBUG("BTA_DM_BLE_PASSKEY_REQ_EVT. ");
btif_dm_ble_key_nc_req_evt(&p_data->key_notif);
break;
case BTA_DM_BLE_OOB_REQ_EVT:
BTIF_TRACE_DEBUG("BTA_DM_BLE_OOB_REQ_EVT. ");
break;
case BTA_DM_BLE_LOCAL_IR_EVT:
BTIF_TRACE_DEBUG("BTA_DM_BLE_LOCAL_IR_EVT. ");
ble_local_key_cb.is_id_keys_rcvd = TRUE;
memcpy(&ble_local_key_cb.id_keys.irk[0],
&p_data->ble_id_keys.irk[0], sizeof(BT_OCTET16));
memcpy(&ble_local_key_cb.id_keys.ir[0],
&p_data->ble_id_keys.ir[0], sizeof(BT_OCTET16));
memcpy(&ble_local_key_cb.id_keys.dhk[0],
&p_data->ble_id_keys.dhk[0], sizeof(BT_OCTET16));
btif_storage_add_ble_local_key( (char *)&ble_local_key_cb.id_keys.irk[0],
BTIF_DM_LE_LOCAL_KEY_IRK,
BT_OCTET16_LEN);
btif_storage_add_ble_local_key( (char *)&ble_local_key_cb.id_keys.ir[0],
BTIF_DM_LE_LOCAL_KEY_IR,
BT_OCTET16_LEN);
btif_storage_add_ble_local_key( (char *)&ble_local_key_cb.id_keys.dhk[0],
BTIF_DM_LE_LOCAL_KEY_DHK,
BT_OCTET16_LEN);
break;
case BTA_DM_BLE_LOCAL_ER_EVT:
BTIF_TRACE_DEBUG("BTA_DM_BLE_LOCAL_ER_EVT. ");
ble_local_key_cb.is_er_rcvd = TRUE;
memcpy(&ble_local_key_cb.er[0], &p_data->ble_er[0], sizeof(BT_OCTET16));
btif_storage_add_ble_local_key( (char *)&ble_local_key_cb.er[0],
BTIF_DM_LE_LOCAL_KEY_ER,
BT_OCTET16_LEN);
break;
case BTA_DM_BLE_AUTH_CMPL_EVT:
BTIF_TRACE_DEBUG("BTA_DM_BLE_AUTH_CMPL_EVT. ");
btif_dm_ble_auth_cmpl_evt(&p_data->auth_cmpl);
break;
case BTA_DM_LE_FEATURES_READ:
{
tBTM_BLE_VSC_CB cmn_vsc_cb;
bt_local_le_features_t local_le_features;
char buf[512];
bt_property_t prop;
prop.type = BT_PROPERTY_LOCAL_LE_FEATURES;
prop.val = (void*)buf;
prop.len = sizeof(buf);
/* LE features are not stored in storage. Should be retrived from stack */
BTM_BleGetVendorCapabilities(&cmn_vsc_cb);
local_le_features.local_privacy_enabled = BTM_BleLocalPrivacyEnabled();
prop.len = sizeof (bt_local_le_features_t);
if (cmn_vsc_cb.filter_support == 1)
local_le_features.max_adv_filter_supported = cmn_vsc_cb.max_filter;
else
local_le_features.max_adv_filter_supported = 0;
local_le_features.max_adv_instance = cmn_vsc_cb.adv_inst_max;
local_le_features.max_irk_list_size = cmn_vsc_cb.max_irk_list_sz;
local_le_features.rpa_offload_supported = cmn_vsc_cb.rpa_offloading;
local_le_features.activity_energy_info_supported = cmn_vsc_cb.energy_support;
local_le_features.scan_result_storage_size = cmn_vsc_cb.tot_scan_results_strg;
local_le_features.version_supported = cmn_vsc_cb.version_supported;
local_le_features.total_trackable_advertisers =
cmn_vsc_cb.total_trackable_advertisers;
local_le_features.extended_scan_support = cmn_vsc_cb.extended_scan_support > 0;
local_le_features.debug_logging_supported = cmn_vsc_cb.debug_logging_supported > 0;
memcpy(prop.val, &local_le_features, prop.len);
HAL_CBACK(bt_hal_cbacks, adapter_properties_cb, BT_STATUS_SUCCESS, 1, &prop);
break;
}
case BTA_DM_ENER_INFO_READ:
{
btif_activity_energy_info_cb_t *p_ener_data = (btif_activity_energy_info_cb_t*) p_param;
bt_activity_energy_info energy_info;
energy_info.status = p_ener_data->status;
energy_info.ctrl_state = p_ener_data->ctrl_state;
energy_info.rx_time = p_ener_data->rx_time;
energy_info.tx_time = p_ener_data->tx_time;
energy_info.idle_time = p_ener_data->idle_time;
energy_info.energy_used = p_ener_data->energy_used;
HAL_CBACK(bt_hal_cbacks, energy_info_cb, &energy_info);
break;
}
#endif
case BTA_DM_AUTHORIZE_EVT:
case BTA_DM_SIG_STRENGTH_EVT:
case BTA_DM_SP_RMT_OOB_EVT:
case BTA_DM_SP_KEYPRESS_EVT:
case BTA_DM_ROLE_CHG_EVT:
default:
BTIF_TRACE_WARNING( "btif_dm_cback : unhandled event (%d)", event );
break;
}
btif_dm_data_free(event, p_data);
}
Commit Message: DO NOT MERGE Fix potential DoS caused by delivering signal to BT process
Bug: 28885210
Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360
Conflicts:
btif/co/bta_hh_co.c
btif/src/btif_core.c
Merge conflict resolution of ag/1161415 (referencing ag/1164670)
- Directly into mnc-mr2-release
CWE ID: CWE-284 | static void btif_dm_upstreams_evt(UINT16 event, char* p_param)
{
tBTA_DM_SEC *p_data = (tBTA_DM_SEC*)p_param;
tBTA_SERVICE_MASK service_mask;
uint32_t i;
bt_bdaddr_t bd_addr;
BTIF_TRACE_EVENT("btif_dm_upstreams_cback ev: %s", dump_dm_event(event));
switch (event)
{
case BTA_DM_ENABLE_EVT:
{
BD_NAME bdname;
bt_status_t status;
bt_property_t prop;
prop.type = BT_PROPERTY_BDNAME;
prop.len = BD_NAME_LEN;
prop.val = (void*)bdname;
status = btif_storage_get_adapter_property(&prop);
if (status == BT_STATUS_SUCCESS)
{
/* A name exists in the storage. Make this the device name */
BTA_DmSetDeviceName((char*)prop.val);
}
else
{
/* Storage does not have a name yet.
* Use the default name and write it to the chip
*/
BTA_DmSetDeviceName(btif_get_default_local_name());
}
#if (defined(BLE_INCLUDED) && (BLE_INCLUDED == TRUE))
/* Enable local privacy */
BTA_DmBleConfigLocalPrivacy(BLE_LOCAL_PRIVACY_ENABLED);
#endif
/* for each of the enabled services in the mask, trigger the profile
* enable */
service_mask = btif_get_enabled_services_mask();
for (i=0; i <= BTA_MAX_SERVICE_ID; i++)
{
if (service_mask &
(tBTA_SERVICE_MASK)(BTA_SERVICE_ID_TO_SERVICE_MASK(i)))
{
btif_in_execute_service_request(i, TRUE);
}
}
/* clear control blocks */
memset(&pairing_cb, 0, sizeof(btif_dm_pairing_cb_t));
pairing_cb.bond_type = BOND_TYPE_PERSISTENT;
/* This function will also trigger the adapter_properties_cb
** and bonded_devices_info_cb
*/
btif_storage_load_bonded_devices();
btif_storage_load_autopair_device_list();
btif_enable_bluetooth_evt(p_data->enable.status);
}
break;
case BTA_DM_DISABLE_EVT:
/* for each of the enabled services in the mask, trigger the profile
* disable */
service_mask = btif_get_enabled_services_mask();
for (i=0; i <= BTA_MAX_SERVICE_ID; i++)
{
if (service_mask &
(tBTA_SERVICE_MASK)(BTA_SERVICE_ID_TO_SERVICE_MASK(i)))
{
btif_in_execute_service_request(i, FALSE);
}
}
btif_disable_bluetooth_evt();
break;
case BTA_DM_PIN_REQ_EVT:
btif_dm_pin_req_evt(&p_data->pin_req);
break;
case BTA_DM_AUTH_CMPL_EVT:
btif_dm_auth_cmpl_evt(&p_data->auth_cmpl);
break;
case BTA_DM_BOND_CANCEL_CMPL_EVT:
if (pairing_cb.state == BT_BOND_STATE_BONDING)
{
bdcpy(bd_addr.address, pairing_cb.bd_addr);
btm_set_bond_type_dev(pairing_cb.bd_addr, BOND_TYPE_UNKNOWN);
bond_state_changed(p_data->bond_cancel_cmpl.result, &bd_addr, BT_BOND_STATE_NONE);
}
break;
case BTA_DM_SP_CFM_REQ_EVT:
btif_dm_ssp_cfm_req_evt(&p_data->cfm_req);
break;
case BTA_DM_SP_KEY_NOTIF_EVT:
btif_dm_ssp_key_notif_evt(&p_data->key_notif);
break;
case BTA_DM_DEV_UNPAIRED_EVT:
bdcpy(bd_addr.address, p_data->link_down.bd_addr);
btm_set_bond_type_dev(p_data->link_down.bd_addr, BOND_TYPE_UNKNOWN);
/*special handling for HID devices */
#if (defined(BTA_HH_INCLUDED) && (BTA_HH_INCLUDED == TRUE))
btif_hh_remove_device(bd_addr);
#endif
btif_storage_remove_bonded_device(&bd_addr);
bond_state_changed(BT_STATUS_SUCCESS, &bd_addr, BT_BOND_STATE_NONE);
break;
case BTA_DM_BUSY_LEVEL_EVT:
{
if (p_data->busy_level.level_flags & BTM_BL_INQUIRY_PAGING_MASK)
{
if (p_data->busy_level.level_flags == BTM_BL_INQUIRY_STARTED)
{
HAL_CBACK(bt_hal_cbacks, discovery_state_changed_cb,
BT_DISCOVERY_STARTED);
btif_dm_inquiry_in_progress = TRUE;
}
else if (p_data->busy_level.level_flags == BTM_BL_INQUIRY_CANCELLED)
{
HAL_CBACK(bt_hal_cbacks, discovery_state_changed_cb,
BT_DISCOVERY_STOPPED);
btif_dm_inquiry_in_progress = FALSE;
}
else if (p_data->busy_level.level_flags == BTM_BL_INQUIRY_COMPLETE)
{
btif_dm_inquiry_in_progress = FALSE;
}
}
}break;
case BTA_DM_LINK_UP_EVT:
bdcpy(bd_addr.address, p_data->link_up.bd_addr);
BTIF_TRACE_DEBUG("BTA_DM_LINK_UP_EVT. Sending BT_ACL_STATE_CONNECTED");
btif_update_remote_version_property(&bd_addr);
HAL_CBACK(bt_hal_cbacks, acl_state_changed_cb, BT_STATUS_SUCCESS,
&bd_addr, BT_ACL_STATE_CONNECTED);
break;
case BTA_DM_LINK_DOWN_EVT:
bdcpy(bd_addr.address, p_data->link_down.bd_addr);
btm_set_bond_type_dev(p_data->link_down.bd_addr, BOND_TYPE_UNKNOWN);
BTIF_TRACE_DEBUG("BTA_DM_LINK_DOWN_EVT. Sending BT_ACL_STATE_DISCONNECTED");
HAL_CBACK(bt_hal_cbacks, acl_state_changed_cb, BT_STATUS_SUCCESS,
&bd_addr, BT_ACL_STATE_DISCONNECTED);
break;
case BTA_DM_HW_ERROR_EVT:
BTIF_TRACE_ERROR("Received H/W Error. ");
/* Flush storage data */
btif_config_flush();
TEMP_FAILURE_RETRY(usleep(100000)); /* 100milliseconds */
/* Killing the process to force a restart as part of fault tolerance */
kill(getpid(), SIGKILL);
break;
#if (defined(BLE_INCLUDED) && (BLE_INCLUDED == TRUE))
case BTA_DM_BLE_KEY_EVT:
BTIF_TRACE_DEBUG("BTA_DM_BLE_KEY_EVT key_type=0x%02x ", p_data->ble_key.key_type);
/* If this pairing is by-product of local initiated GATT client Read or Write,
BTA would not have sent BTA_DM_BLE_SEC_REQ_EVT event and Bond state would not
have setup properly. Setup pairing_cb and notify App about Bonding state now*/
if (pairing_cb.state != BT_BOND_STATE_BONDING)
{
BTIF_TRACE_DEBUG("Bond state not sent to App so far.Notify the app now");
bond_state_changed(BT_STATUS_SUCCESS, (bt_bdaddr_t*)p_data->ble_key.bd_addr,
BT_BOND_STATE_BONDING);
}
else if (memcmp (pairing_cb.bd_addr, p_data->ble_key.bd_addr, BD_ADDR_LEN)!=0)
{
BTIF_TRACE_ERROR("BD mismatch discard BLE key_type=%d ",p_data->ble_key.key_type);
break;
}
switch (p_data->ble_key.key_type)
{
case BTA_LE_KEY_PENC:
BTIF_TRACE_DEBUG("Rcv BTA_LE_KEY_PENC");
pairing_cb.ble.is_penc_key_rcvd = TRUE;
pairing_cb.ble.penc_key = p_data->ble_key.p_key_value->penc_key;
break;
case BTA_LE_KEY_PID:
BTIF_TRACE_DEBUG("Rcv BTA_LE_KEY_PID");
pairing_cb.ble.is_pid_key_rcvd = TRUE;
pairing_cb.ble.pid_key = p_data->ble_key.p_key_value->pid_key;
break;
case BTA_LE_KEY_PCSRK:
BTIF_TRACE_DEBUG("Rcv BTA_LE_KEY_PCSRK");
pairing_cb.ble.is_pcsrk_key_rcvd = TRUE;
pairing_cb.ble.pcsrk_key = p_data->ble_key.p_key_value->pcsrk_key;
break;
case BTA_LE_KEY_LENC:
BTIF_TRACE_DEBUG("Rcv BTA_LE_KEY_LENC");
pairing_cb.ble.is_lenc_key_rcvd = TRUE;
pairing_cb.ble.lenc_key = p_data->ble_key.p_key_value->lenc_key;
break;
case BTA_LE_KEY_LCSRK:
BTIF_TRACE_DEBUG("Rcv BTA_LE_KEY_LCSRK");
pairing_cb.ble.is_lcsrk_key_rcvd = TRUE;
pairing_cb.ble.lcsrk_key = p_data->ble_key.p_key_value->lcsrk_key;
break;
case BTA_LE_KEY_LID:
BTIF_TRACE_DEBUG("Rcv BTA_LE_KEY_LID");
pairing_cb.ble.is_lidk_key_rcvd = TRUE;
break;
default:
BTIF_TRACE_ERROR("unknown BLE key type (0x%02x)", p_data->ble_key.key_type);
break;
}
break;
case BTA_DM_BLE_SEC_REQ_EVT:
BTIF_TRACE_DEBUG("BTA_DM_BLE_SEC_REQ_EVT. ");
btif_dm_ble_sec_req_evt(&p_data->ble_req);
break;
case BTA_DM_BLE_PASSKEY_NOTIF_EVT:
BTIF_TRACE_DEBUG("BTA_DM_BLE_PASSKEY_NOTIF_EVT. ");
btif_dm_ble_key_notif_evt(&p_data->key_notif);
break;
case BTA_DM_BLE_PASSKEY_REQ_EVT:
BTIF_TRACE_DEBUG("BTA_DM_BLE_PASSKEY_REQ_EVT. ");
btif_dm_ble_passkey_req_evt(&p_data->pin_req);
break;
case BTA_DM_BLE_NC_REQ_EVT:
BTIF_TRACE_DEBUG("BTA_DM_BLE_PASSKEY_REQ_EVT. ");
btif_dm_ble_key_nc_req_evt(&p_data->key_notif);
break;
case BTA_DM_BLE_OOB_REQ_EVT:
BTIF_TRACE_DEBUG("BTA_DM_BLE_OOB_REQ_EVT. ");
break;
case BTA_DM_BLE_LOCAL_IR_EVT:
BTIF_TRACE_DEBUG("BTA_DM_BLE_LOCAL_IR_EVT. ");
ble_local_key_cb.is_id_keys_rcvd = TRUE;
memcpy(&ble_local_key_cb.id_keys.irk[0],
&p_data->ble_id_keys.irk[0], sizeof(BT_OCTET16));
memcpy(&ble_local_key_cb.id_keys.ir[0],
&p_data->ble_id_keys.ir[0], sizeof(BT_OCTET16));
memcpy(&ble_local_key_cb.id_keys.dhk[0],
&p_data->ble_id_keys.dhk[0], sizeof(BT_OCTET16));
btif_storage_add_ble_local_key( (char *)&ble_local_key_cb.id_keys.irk[0],
BTIF_DM_LE_LOCAL_KEY_IRK,
BT_OCTET16_LEN);
btif_storage_add_ble_local_key( (char *)&ble_local_key_cb.id_keys.ir[0],
BTIF_DM_LE_LOCAL_KEY_IR,
BT_OCTET16_LEN);
btif_storage_add_ble_local_key( (char *)&ble_local_key_cb.id_keys.dhk[0],
BTIF_DM_LE_LOCAL_KEY_DHK,
BT_OCTET16_LEN);
break;
case BTA_DM_BLE_LOCAL_ER_EVT:
BTIF_TRACE_DEBUG("BTA_DM_BLE_LOCAL_ER_EVT. ");
ble_local_key_cb.is_er_rcvd = TRUE;
memcpy(&ble_local_key_cb.er[0], &p_data->ble_er[0], sizeof(BT_OCTET16));
btif_storage_add_ble_local_key( (char *)&ble_local_key_cb.er[0],
BTIF_DM_LE_LOCAL_KEY_ER,
BT_OCTET16_LEN);
break;
case BTA_DM_BLE_AUTH_CMPL_EVT:
BTIF_TRACE_DEBUG("BTA_DM_BLE_AUTH_CMPL_EVT. ");
btif_dm_ble_auth_cmpl_evt(&p_data->auth_cmpl);
break;
case BTA_DM_LE_FEATURES_READ:
{
tBTM_BLE_VSC_CB cmn_vsc_cb;
bt_local_le_features_t local_le_features;
char buf[512];
bt_property_t prop;
prop.type = BT_PROPERTY_LOCAL_LE_FEATURES;
prop.val = (void*)buf;
prop.len = sizeof(buf);
/* LE features are not stored in storage. Should be retrived from stack */
BTM_BleGetVendorCapabilities(&cmn_vsc_cb);
local_le_features.local_privacy_enabled = BTM_BleLocalPrivacyEnabled();
prop.len = sizeof (bt_local_le_features_t);
if (cmn_vsc_cb.filter_support == 1)
local_le_features.max_adv_filter_supported = cmn_vsc_cb.max_filter;
else
local_le_features.max_adv_filter_supported = 0;
local_le_features.max_adv_instance = cmn_vsc_cb.adv_inst_max;
local_le_features.max_irk_list_size = cmn_vsc_cb.max_irk_list_sz;
local_le_features.rpa_offload_supported = cmn_vsc_cb.rpa_offloading;
local_le_features.activity_energy_info_supported = cmn_vsc_cb.energy_support;
local_le_features.scan_result_storage_size = cmn_vsc_cb.tot_scan_results_strg;
local_le_features.version_supported = cmn_vsc_cb.version_supported;
local_le_features.total_trackable_advertisers =
cmn_vsc_cb.total_trackable_advertisers;
local_le_features.extended_scan_support = cmn_vsc_cb.extended_scan_support > 0;
local_le_features.debug_logging_supported = cmn_vsc_cb.debug_logging_supported > 0;
memcpy(prop.val, &local_le_features, prop.len);
HAL_CBACK(bt_hal_cbacks, adapter_properties_cb, BT_STATUS_SUCCESS, 1, &prop);
break;
}
case BTA_DM_ENER_INFO_READ:
{
btif_activity_energy_info_cb_t *p_ener_data = (btif_activity_energy_info_cb_t*) p_param;
bt_activity_energy_info energy_info;
energy_info.status = p_ener_data->status;
energy_info.ctrl_state = p_ener_data->ctrl_state;
energy_info.rx_time = p_ener_data->rx_time;
energy_info.tx_time = p_ener_data->tx_time;
energy_info.idle_time = p_ener_data->idle_time;
energy_info.energy_used = p_ener_data->energy_used;
HAL_CBACK(bt_hal_cbacks, energy_info_cb, &energy_info);
break;
}
#endif
case BTA_DM_AUTHORIZE_EVT:
case BTA_DM_SIG_STRENGTH_EVT:
case BTA_DM_SP_RMT_OOB_EVT:
case BTA_DM_SP_KEYPRESS_EVT:
case BTA_DM_ROLE_CHG_EVT:
default:
BTIF_TRACE_WARNING( "btif_dm_cback : unhandled event (%d)", event );
break;
}
btif_dm_data_free(event, p_data);
}
| 173,436 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static v8::Handle<v8::Value> overloadedMethod7Callback(const v8::Arguments& args)
{
INC_STATS("DOM.TestObj.overloadedMethod7");
if (args.Length() < 1)
return V8Proxy::throwNotEnoughArgumentsError();
TestObj* imp = V8TestObj::toNative(args.Holder());
EXCEPTION_BLOCK(RefPtr<DOMStringList>, arrayArg, v8ValueToWebCoreDOMStringList(MAYBE_MISSING_PARAMETER(args, 0, DefaultIsUndefined)));
imp->overloadedMethod(arrayArg);
return v8::Handle<v8::Value>();
}
Commit Message: [V8] Pass Isolate to throwNotEnoughArgumentsError()
https://bugs.webkit.org/show_bug.cgi?id=86983
Reviewed by Adam Barth.
The objective is to pass Isolate around in V8 bindings.
This patch passes Isolate to throwNotEnoughArgumentsError().
No tests. No change in behavior.
* bindings/scripts/CodeGeneratorV8.pm:
(GenerateArgumentsCountCheck):
(GenerateEventConstructorCallback):
* bindings/scripts/test/V8/V8Float64Array.cpp:
(WebCore::Float64ArrayV8Internal::fooCallback):
* bindings/scripts/test/V8/V8TestActiveDOMObject.cpp:
(WebCore::TestActiveDOMObjectV8Internal::excitingFunctionCallback):
(WebCore::TestActiveDOMObjectV8Internal::postMessageCallback):
* bindings/scripts/test/V8/V8TestCustomNamedGetter.cpp:
(WebCore::TestCustomNamedGetterV8Internal::anotherFunctionCallback):
* bindings/scripts/test/V8/V8TestEventConstructor.cpp:
(WebCore::V8TestEventConstructor::constructorCallback):
* bindings/scripts/test/V8/V8TestEventTarget.cpp:
(WebCore::TestEventTargetV8Internal::itemCallback):
(WebCore::TestEventTargetV8Internal::dispatchEventCallback):
* bindings/scripts/test/V8/V8TestInterface.cpp:
(WebCore::TestInterfaceV8Internal::supplementalMethod2Callback):
(WebCore::V8TestInterface::constructorCallback):
* bindings/scripts/test/V8/V8TestMediaQueryListListener.cpp:
(WebCore::TestMediaQueryListListenerV8Internal::methodCallback):
* bindings/scripts/test/V8/V8TestNamedConstructor.cpp:
(WebCore::V8TestNamedConstructorConstructorCallback):
* bindings/scripts/test/V8/V8TestObj.cpp:
(WebCore::TestObjV8Internal::voidMethodWithArgsCallback):
(WebCore::TestObjV8Internal::intMethodWithArgsCallback):
(WebCore::TestObjV8Internal::objMethodWithArgsCallback):
(WebCore::TestObjV8Internal::methodWithSequenceArgCallback):
(WebCore::TestObjV8Internal::methodReturningSequenceCallback):
(WebCore::TestObjV8Internal::methodThatRequiresAllArgsAndThrowsCallback):
(WebCore::TestObjV8Internal::serializedValueCallback):
(WebCore::TestObjV8Internal::idbKeyCallback):
(WebCore::TestObjV8Internal::optionsObjectCallback):
(WebCore::TestObjV8Internal::methodWithNonOptionalArgAndOptionalArgCallback):
(WebCore::TestObjV8Internal::methodWithNonOptionalArgAndTwoOptionalArgsCallback):
(WebCore::TestObjV8Internal::methodWithCallbackArgCallback):
(WebCore::TestObjV8Internal::methodWithNonCallbackArgAndCallbackArgCallback):
(WebCore::TestObjV8Internal::overloadedMethod1Callback):
(WebCore::TestObjV8Internal::overloadedMethod2Callback):
(WebCore::TestObjV8Internal::overloadedMethod3Callback):
(WebCore::TestObjV8Internal::overloadedMethod4Callback):
(WebCore::TestObjV8Internal::overloadedMethod5Callback):
(WebCore::TestObjV8Internal::overloadedMethod6Callback):
(WebCore::TestObjV8Internal::overloadedMethod7Callback):
(WebCore::TestObjV8Internal::overloadedMethod11Callback):
(WebCore::TestObjV8Internal::overloadedMethod12Callback):
(WebCore::TestObjV8Internal::enabledAtRuntimeMethod1Callback):
(WebCore::TestObjV8Internal::enabledAtRuntimeMethod2Callback):
(WebCore::TestObjV8Internal::convert1Callback):
(WebCore::TestObjV8Internal::convert2Callback):
(WebCore::TestObjV8Internal::convert3Callback):
(WebCore::TestObjV8Internal::convert4Callback):
(WebCore::TestObjV8Internal::convert5Callback):
(WebCore::TestObjV8Internal::strictFunctionCallback):
(WebCore::V8TestObj::constructorCallback):
* bindings/scripts/test/V8/V8TestSerializedScriptValueInterface.cpp:
(WebCore::TestSerializedScriptValueInterfaceV8Internal::acceptTransferListCallback):
(WebCore::V8TestSerializedScriptValueInterface::constructorCallback):
* bindings/v8/ScriptController.cpp:
(WebCore::setValueAndClosePopupCallback):
* bindings/v8/V8Proxy.cpp:
(WebCore::V8Proxy::throwNotEnoughArgumentsError):
* bindings/v8/V8Proxy.h:
(V8Proxy):
* bindings/v8/custom/V8AudioContextCustom.cpp:
(WebCore::V8AudioContext::constructorCallback):
* bindings/v8/custom/V8DataViewCustom.cpp:
(WebCore::V8DataView::getInt8Callback):
(WebCore::V8DataView::getUint8Callback):
(WebCore::V8DataView::setInt8Callback):
(WebCore::V8DataView::setUint8Callback):
* bindings/v8/custom/V8DirectoryEntryCustom.cpp:
(WebCore::V8DirectoryEntry::getDirectoryCallback):
(WebCore::V8DirectoryEntry::getFileCallback):
* bindings/v8/custom/V8IntentConstructor.cpp:
(WebCore::V8Intent::constructorCallback):
* bindings/v8/custom/V8SVGLengthCustom.cpp:
(WebCore::V8SVGLength::convertToSpecifiedUnitsCallback):
* bindings/v8/custom/V8WebGLRenderingContextCustom.cpp:
(WebCore::getObjectParameter):
(WebCore::V8WebGLRenderingContext::getAttachedShadersCallback):
(WebCore::V8WebGLRenderingContext::getExtensionCallback):
(WebCore::V8WebGLRenderingContext::getFramebufferAttachmentParameterCallback):
(WebCore::V8WebGLRenderingContext::getParameterCallback):
(WebCore::V8WebGLRenderingContext::getProgramParameterCallback):
(WebCore::V8WebGLRenderingContext::getShaderParameterCallback):
(WebCore::V8WebGLRenderingContext::getUniformCallback):
(WebCore::vertexAttribAndUniformHelperf):
(WebCore::uniformHelperi):
(WebCore::uniformMatrixHelper):
* bindings/v8/custom/V8WebKitMutationObserverCustom.cpp:
(WebCore::V8WebKitMutationObserver::constructorCallback):
(WebCore::V8WebKitMutationObserver::observeCallback):
* bindings/v8/custom/V8WebSocketCustom.cpp:
(WebCore::V8WebSocket::constructorCallback):
(WebCore::V8WebSocket::sendCallback):
* bindings/v8/custom/V8XMLHttpRequestCustom.cpp:
(WebCore::V8XMLHttpRequest::openCallback):
git-svn-id: svn://svn.chromium.org/blink/trunk@117736 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: | static v8::Handle<v8::Value> overloadedMethod7Callback(const v8::Arguments& args)
{
INC_STATS("DOM.TestObj.overloadedMethod7");
if (args.Length() < 1)
return V8Proxy::throwNotEnoughArgumentsError(args.GetIsolate());
TestObj* imp = V8TestObj::toNative(args.Holder());
EXCEPTION_BLOCK(RefPtr<DOMStringList>, arrayArg, v8ValueToWebCoreDOMStringList(MAYBE_MISSING_PARAMETER(args, 0, DefaultIsUndefined)));
imp->overloadedMethod(arrayArg);
return v8::Handle<v8::Value>();
}
| 171,103 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: IDNSpoofChecker::IDNSpoofChecker() {
UErrorCode status = U_ZERO_ERROR;
checker_ = uspoof_open(&status);
if (U_FAILURE(status)) {
checker_ = nullptr;
return;
}
uspoof_setRestrictionLevel(checker_, USPOOF_MODERATELY_RESTRICTIVE);
SetAllowedUnicodeSet(&status);
int32_t checks = uspoof_getChecks(checker_, &status) | USPOOF_AUX_INFO;
uspoof_setChecks(checker_, checks, &status);
deviation_characters_ =
icu::UnicodeSet(UNICODE_STRING_SIMPLE("[\\u00df\\u03c2\\u200c\\u200d]"),
status);
deviation_characters_.freeze();
non_ascii_latin_letters_ = icu::UnicodeSet(
UNICODE_STRING_SIMPLE("[[:Latin:] - [a-zA-Z]]"), status);
non_ascii_latin_letters_.freeze();
kana_letters_exceptions_ = icu::UnicodeSet(UNICODE_STRING_SIMPLE(
"[\\u3078-\\u307a\\u30d8-\\u30da\\u30fb\\u30fc]"), status);
kana_letters_exceptions_.freeze();
DCHECK(U_SUCCESS(status));
}
Commit Message: Block domain labels made of Cyrillic letters that look alike Latin
Block a label made entirely of Latin-look-alike Cyrillic letters when the TLD is not an IDN (i.e. this check is ON only for TLDs like 'com', 'net', 'uk', but not applied for IDN TLDs like рф.
BUG=683314
TEST=components_unittests --gtest_filter=U*IDN*
Review-Url: https://codereview.chromium.org/2683793010
Cr-Commit-Position: refs/heads/master@{#459226}
CWE ID: CWE-20 | IDNSpoofChecker::IDNSpoofChecker() {
UErrorCode status = U_ZERO_ERROR;
checker_ = uspoof_open(&status);
if (U_FAILURE(status)) {
checker_ = nullptr;
return;
}
uspoof_setRestrictionLevel(checker_, USPOOF_MODERATELY_RESTRICTIVE);
SetAllowedUnicodeSet(&status);
int32_t checks = uspoof_getChecks(checker_, &status) | USPOOF_AUX_INFO;
uspoof_setChecks(checker_, checks, &status);
deviation_characters_ =
icu::UnicodeSet(UNICODE_STRING_SIMPLE("[\\u00df\\u03c2\\u200c\\u200d]"),
status);
deviation_characters_.freeze();
non_ascii_latin_letters_ = icu::UnicodeSet(
UNICODE_STRING_SIMPLE("[[:Latin:] - [a-zA-Z]]"), status);
non_ascii_latin_letters_.freeze();
kana_letters_exceptions_ = icu::UnicodeSet(UNICODE_STRING_SIMPLE(
"[\\u3078-\\u307a\\u30d8-\\u30da\\u30fb\\u30fc]"), status);
kana_letters_exceptions_.freeze();
// These Cyrillic letters look like Latin. A domain label entirely made of
// these letters is blocked as a simpliified whole-script-spoofable.
cyrillic_letters_latin_alike_ =
icu::UnicodeSet(icu::UnicodeString("[асԁеһіјӏорԛѕԝхуъЬҽпгѵѡ]"), status);
cyrillic_letters_latin_alike_.freeze();
cyrillic_letters_ =
icu::UnicodeSet(UNICODE_STRING_SIMPLE("[[:Cyrl:]]"), status);
cyrillic_letters_.freeze();
DCHECK(U_SUCCESS(status));
}
| 172,389 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void CloudPolicyController::SetState(
CloudPolicyController::ControllerState new_state) {
state_ = new_state;
backend_.reset(); // Discard any pending requests.
base::Time now(base::Time::NowFromSystemTime());
base::Time refresh_at;
base::Time last_refresh(cache_->last_policy_refresh_time());
if (last_refresh.is_null())
last_refresh = now;
bool inform_notifier_done = false;
switch (state_) {
case STATE_TOKEN_UNMANAGED:
notifier_->Inform(CloudPolicySubsystem::UNMANAGED,
CloudPolicySubsystem::NO_DETAILS,
PolicyNotifier::POLICY_CONTROLLER);
break;
case STATE_TOKEN_UNAVAILABLE:
case STATE_TOKEN_VALID:
refresh_at = now;
break;
case STATE_POLICY_VALID:
effective_policy_refresh_error_delay_ms_ =
kPolicyRefreshErrorDelayInMilliseconds;
refresh_at =
last_refresh + base::TimeDelta::FromMilliseconds(GetRefreshDelay());
notifier_->Inform(CloudPolicySubsystem::SUCCESS,
CloudPolicySubsystem::NO_DETAILS,
PolicyNotifier::POLICY_CONTROLLER);
break;
case STATE_TOKEN_ERROR:
notifier_->Inform(CloudPolicySubsystem::NETWORK_ERROR,
CloudPolicySubsystem::BAD_DMTOKEN,
PolicyNotifier::POLICY_CONTROLLER);
inform_notifier_done = true;
case STATE_POLICY_ERROR:
if (!inform_notifier_done) {
notifier_->Inform(CloudPolicySubsystem::NETWORK_ERROR,
CloudPolicySubsystem::POLICY_NETWORK_ERROR,
PolicyNotifier::POLICY_CONTROLLER);
}
refresh_at = now + base::TimeDelta::FromMilliseconds(
effective_policy_refresh_error_delay_ms_);
effective_policy_refresh_error_delay_ms_ =
std::min(effective_policy_refresh_error_delay_ms_ * 2,
policy_refresh_rate_ms_);
break;
case STATE_POLICY_UNAVAILABLE:
effective_policy_refresh_error_delay_ms_ = policy_refresh_rate_ms_;
refresh_at = now + base::TimeDelta::FromMilliseconds(
effective_policy_refresh_error_delay_ms_);
notifier_->Inform(CloudPolicySubsystem::NETWORK_ERROR,
CloudPolicySubsystem::POLICY_NETWORK_ERROR,
PolicyNotifier::POLICY_CONTROLLER);
break;
}
scheduler_->CancelDelayedWork();
if (!refresh_at.is_null()) {
int64 delay = std::max<int64>((refresh_at - now).InMilliseconds(), 0);
scheduler_->PostDelayedWork(
base::Bind(&CloudPolicyController::DoWork, base::Unretained(this)),
delay);
}
}
Commit Message: Reset the device policy machinery upon retrying enrollment.
BUG=chromium-os:18208
TEST=See bug description
Review URL: http://codereview.chromium.org/7676005
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@97615 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | void CloudPolicyController::SetState(
CloudPolicyController::ControllerState new_state) {
state_ = new_state;
backend_.reset(); // Stop any pending requests.
base::Time now(base::Time::NowFromSystemTime());
base::Time refresh_at;
base::Time last_refresh(cache_->last_policy_refresh_time());
if (last_refresh.is_null())
last_refresh = now;
bool inform_notifier_done = false;
switch (state_) {
case STATE_TOKEN_UNMANAGED:
notifier_->Inform(CloudPolicySubsystem::UNMANAGED,
CloudPolicySubsystem::NO_DETAILS,
PolicyNotifier::POLICY_CONTROLLER);
break;
case STATE_TOKEN_UNAVAILABLE:
case STATE_TOKEN_VALID:
refresh_at = now;
break;
case STATE_POLICY_VALID:
effective_policy_refresh_error_delay_ms_ =
kPolicyRefreshErrorDelayInMilliseconds;
refresh_at =
last_refresh + base::TimeDelta::FromMilliseconds(GetRefreshDelay());
notifier_->Inform(CloudPolicySubsystem::SUCCESS,
CloudPolicySubsystem::NO_DETAILS,
PolicyNotifier::POLICY_CONTROLLER);
break;
case STATE_TOKEN_ERROR:
notifier_->Inform(CloudPolicySubsystem::NETWORK_ERROR,
CloudPolicySubsystem::BAD_DMTOKEN,
PolicyNotifier::POLICY_CONTROLLER);
inform_notifier_done = true;
case STATE_POLICY_ERROR:
if (!inform_notifier_done) {
notifier_->Inform(CloudPolicySubsystem::NETWORK_ERROR,
CloudPolicySubsystem::POLICY_NETWORK_ERROR,
PolicyNotifier::POLICY_CONTROLLER);
}
refresh_at = now + base::TimeDelta::FromMilliseconds(
effective_policy_refresh_error_delay_ms_);
effective_policy_refresh_error_delay_ms_ =
std::min(effective_policy_refresh_error_delay_ms_ * 2,
policy_refresh_rate_ms_);
break;
case STATE_POLICY_UNAVAILABLE:
effective_policy_refresh_error_delay_ms_ = policy_refresh_rate_ms_;
refresh_at = now + base::TimeDelta::FromMilliseconds(
effective_policy_refresh_error_delay_ms_);
notifier_->Inform(CloudPolicySubsystem::NETWORK_ERROR,
CloudPolicySubsystem::POLICY_NETWORK_ERROR,
PolicyNotifier::POLICY_CONTROLLER);
break;
}
scheduler_->CancelDelayedWork();
if (!refresh_at.is_null()) {
int64 delay = std::max<int64>((refresh_at - now).InMilliseconds(), 0);
scheduler_->PostDelayedWork(
base::Bind(&CloudPolicyController::DoWork, base::Unretained(this)),
delay);
}
}
| 170,281 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void ExtensionTtsController::SpeakOrEnqueue(Utterance* utterance) {
if (IsSpeaking() && utterance->can_enqueue()) {
utterance_queue_.push(utterance);
} else {
Stop();
SpeakNow(utterance);
}
}
Commit Message: Extend TTS extension API to support richer events returned from the engine
to the client. Previously we just had a completed event; this adds start,
word boundary, sentence boundary, and marker boundary. In addition,
interrupted and canceled, which were previously errors, now become events.
Mac and Windows implementations extended to support as many of these events
as possible.
BUG=67713
BUG=70198
BUG=75106
BUG=83404
TEST=Updates all TTS API tests to be event-based, and adds new tests.
Review URL: http://codereview.chromium.org/6792014
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@91665 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-20 | void ExtensionTtsController::SpeakOrEnqueue(Utterance* utterance) {
std::string gender;
if (options->HasKey(constants::kGenderKey))
EXTENSION_FUNCTION_VALIDATE(
options->GetString(constants::kGenderKey, &gender));
if (!gender.empty() &&
gender != constants::kGenderFemale &&
gender != constants::kGenderMale) {
error_ = constants::kErrorInvalidGender;
return false;
}
| 170,389 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void _xml_add_to_info(xml_parser *parser,char *name)
{
zval **element, *values;
if (! parser->info) {
return;
}
if (zend_hash_find(Z_ARRVAL_P(parser->info),name,strlen(name) + 1,(void **) &element) == FAILURE) {
MAKE_STD_ZVAL(values);
array_init(values);
zend_hash_update(Z_ARRVAL_P(parser->info), name, strlen(name)+1, (void *) &values, sizeof(zval*), (void **) &element);
}
add_next_index_long(*element,parser->curtag);
parser->curtag++;
}
Commit Message:
CWE ID: CWE-119 | static void _xml_add_to_info(xml_parser *parser,char *name)
{
zval **element, *values;
if (! parser->info) {
return;
}
if (zend_hash_find(Z_ARRVAL_P(parser->info),name,strlen(name) + 1,(void **) &element) == FAILURE) {
MAKE_STD_ZVAL(values);
array_init(values);
zend_hash_update(Z_ARRVAL_P(parser->info), name, strlen(name)+1, (void *) &values, sizeof(zval*), (void **) &element);
}
add_next_index_long(*element,parser->curtag);
parser->curtag++;
}
| 165,039 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
struct nameidata *nd)
{
int xid;
int rc = 0; /* to get around spurious gcc warning, set to zero here */
__u32 oplock = enable_oplocks ? REQ_OPLOCK : 0;
__u16 fileHandle = 0;
bool posix_open = false;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
struct cifsFileInfo *cfile;
struct inode *newInode = NULL;
char *full_path = NULL;
struct file *filp;
xid = GetXid();
cFYI(1, "parent inode = 0x%p name is: %s and dentry = 0x%p",
parent_dir_inode, direntry->d_name.name, direntry);
/* check whether path exists */
cifs_sb = CIFS_SB(parent_dir_inode->i_sb);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
FreeXid(xid);
return (struct dentry *)tlink;
}
pTcon = tlink_tcon(tlink);
/*
* Don't allow the separator character in a path component.
* The VFS will not allow "/", but "\" is allowed by posix.
*/
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
int i;
for (i = 0; i < direntry->d_name.len; i++)
if (direntry->d_name.name[i] == '\\') {
cFYI(1, "Invalid file name");
rc = -EINVAL;
goto lookup_out;
}
}
/*
* O_EXCL: optimize away the lookup, but don't hash the dentry. Let
* the VFS handle the create.
*/
if (nd && (nd->flags & LOOKUP_EXCL)) {
d_instantiate(direntry, NULL);
rc = 0;
goto lookup_out;
}
/* can not grab the rename sem here since it would
deadlock in the cases (beginning of sys_rename itself)
in which we already have the sb rename sem */
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
goto lookup_out;
}
if (direntry->d_inode != NULL) {
cFYI(1, "non-NULL inode in lookup");
} else {
cFYI(1, "NULL inode in lookup");
}
cFYI(1, "Full path: %s inode = 0x%p", full_path, direntry->d_inode);
/* Posix open is only called (at lookup time) for file create now.
* For opens (rather than creates), because we do not know if it
* is a file or directory yet, and current Samba no longer allows
* us to do posix open on dirs, we could end up wasting an open call
* on what turns out to be a dir. For file opens, we wait to call posix
* open till cifs_open. It could be added here (lookup) in the future
* but the performance tradeoff of the extra network request when EISDIR
* or EACCES is returned would have to be weighed against the 50%
* reduction in network traffic in the other paths.
*/
if (pTcon->unix_ext) {
if (nd && !(nd->flags & LOOKUP_DIRECTORY) &&
(nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
(nd->intent.open.file->f_flags & O_CREAT)) {
rc = cifs_posix_open(full_path, &newInode,
parent_dir_inode->i_sb,
nd->intent.open.create_mode,
nd->intent.open.file->f_flags, &oplock,
&fileHandle, xid);
/*
* The check below works around a bug in POSIX
* open in samba versions 3.3.1 and earlier where
* open could incorrectly fail with invalid parameter.
* If either that or op not supported returned, follow
* the normal lookup.
*/
if ((rc == 0) || (rc == -ENOENT))
posix_open = true;
else if ((rc == -EINVAL) || (rc != -EOPNOTSUPP))
pTcon->broken_posix_open = true;
}
if (!posix_open)
rc = cifs_get_inode_info_unix(&newInode, full_path,
parent_dir_inode->i_sb, xid);
} else
rc = cifs_get_inode_info(&newInode, full_path, NULL,
parent_dir_inode->i_sb, xid, NULL);
if ((rc == 0) && (newInode != NULL)) {
d_add(direntry, newInode);
if (posix_open) {
filp = lookup_instantiate_filp(nd, direntry,
generic_file_open);
if (IS_ERR(filp)) {
rc = PTR_ERR(filp);
CIFSSMBClose(xid, pTcon, fileHandle);
goto lookup_out;
}
cfile = cifs_new_fileinfo(fileHandle, filp, tlink,
oplock);
if (cfile == NULL) {
fput(filp);
CIFSSMBClose(xid, pTcon, fileHandle);
rc = -ENOMEM;
goto lookup_out;
}
}
/* since paths are not looked up by component - the parent
directories are presumed to be good here */
renew_parental_timestamps(direntry);
} else if (rc == -ENOENT) {
rc = 0;
direntry->d_time = jiffies;
d_add(direntry, NULL);
/* if it was once a directory (but how can we tell?) we could do
shrink_dcache_parent(direntry); */
} else if (rc != -EACCES) {
cERROR(1, "Unexpected lookup error %d", rc);
/* We special case check for Access Denied - since that
is a common return code */
}
lookup_out:
kfree(full_path);
cifs_put_tlink(tlink);
FreeXid(xid);
return ERR_PTR(rc);
}
Commit Message: cifs: fix dentry refcount leak when opening a FIFO on lookup
commit 5bccda0ebc7c0331b81ac47d39e4b920b198b2cd upstream.
The cifs code will attempt to open files on lookup under certain
circumstances. What happens though if we find that the file we opened
was actually a FIFO or other special file?
Currently, the open filehandle just ends up being leaked leading to
a dentry refcount mismatch and oops on umount. Fix this by having the
code close the filehandle on the server if it turns out not to be a
regular file. While we're at it, change this spaghetti if statement
into a switch too.
Reported-by: CAI Qian <[email protected]>
Tested-by: CAI Qian <[email protected]>
Reviewed-by: Shirish Pargaonkar <[email protected]>
Signed-off-by: Jeff Layton <[email protected]>
Signed-off-by: Steve French <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
CWE ID: CWE-264 | cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
struct nameidata *nd)
{
int xid;
int rc = 0; /* to get around spurious gcc warning, set to zero here */
__u32 oplock = enable_oplocks ? REQ_OPLOCK : 0;
__u16 fileHandle = 0;
bool posix_open = false;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
struct cifsFileInfo *cfile;
struct inode *newInode = NULL;
char *full_path = NULL;
struct file *filp;
xid = GetXid();
cFYI(1, "parent inode = 0x%p name is: %s and dentry = 0x%p",
parent_dir_inode, direntry->d_name.name, direntry);
/* check whether path exists */
cifs_sb = CIFS_SB(parent_dir_inode->i_sb);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
FreeXid(xid);
return (struct dentry *)tlink;
}
pTcon = tlink_tcon(tlink);
/*
* Don't allow the separator character in a path component.
* The VFS will not allow "/", but "\" is allowed by posix.
*/
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
int i;
for (i = 0; i < direntry->d_name.len; i++)
if (direntry->d_name.name[i] == '\\') {
cFYI(1, "Invalid file name");
rc = -EINVAL;
goto lookup_out;
}
}
/*
* O_EXCL: optimize away the lookup, but don't hash the dentry. Let
* the VFS handle the create.
*/
if (nd && (nd->flags & LOOKUP_EXCL)) {
d_instantiate(direntry, NULL);
rc = 0;
goto lookup_out;
}
/* can not grab the rename sem here since it would
deadlock in the cases (beginning of sys_rename itself)
in which we already have the sb rename sem */
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
goto lookup_out;
}
if (direntry->d_inode != NULL) {
cFYI(1, "non-NULL inode in lookup");
} else {
cFYI(1, "NULL inode in lookup");
}
cFYI(1, "Full path: %s inode = 0x%p", full_path, direntry->d_inode);
/* Posix open is only called (at lookup time) for file create now.
* For opens (rather than creates), because we do not know if it
* is a file or directory yet, and current Samba no longer allows
* us to do posix open on dirs, we could end up wasting an open call
* on what turns out to be a dir. For file opens, we wait to call posix
* open till cifs_open. It could be added here (lookup) in the future
* but the performance tradeoff of the extra network request when EISDIR
* or EACCES is returned would have to be weighed against the 50%
* reduction in network traffic in the other paths.
*/
if (pTcon->unix_ext) {
if (nd && !(nd->flags & LOOKUP_DIRECTORY) &&
(nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
(nd->intent.open.file->f_flags & O_CREAT)) {
rc = cifs_posix_open(full_path, &newInode,
parent_dir_inode->i_sb,
nd->intent.open.create_mode,
nd->intent.open.file->f_flags, &oplock,
&fileHandle, xid);
/*
* The check below works around a bug in POSIX
* open in samba versions 3.3.1 and earlier where
* open could incorrectly fail with invalid parameter.
* If either that or op not supported returned, follow
* the normal lookup.
*/
switch (rc) {
case 0:
/*
* The server may allow us to open things like
* FIFOs, but the client isn't set up to deal
* with that. If it's not a regular file, just
* close it and proceed as if it were a normal
* lookup.
*/
if (newInode && !S_ISREG(newInode->i_mode)) {
CIFSSMBClose(xid, pTcon, fileHandle);
break;
}
case -ENOENT:
posix_open = true;
case -EOPNOTSUPP:
break;
default:
pTcon->broken_posix_open = true;
}
}
if (!posix_open)
rc = cifs_get_inode_info_unix(&newInode, full_path,
parent_dir_inode->i_sb, xid);
} else
rc = cifs_get_inode_info(&newInode, full_path, NULL,
parent_dir_inode->i_sb, xid, NULL);
if ((rc == 0) && (newInode != NULL)) {
d_add(direntry, newInode);
if (posix_open) {
filp = lookup_instantiate_filp(nd, direntry,
generic_file_open);
if (IS_ERR(filp)) {
rc = PTR_ERR(filp);
CIFSSMBClose(xid, pTcon, fileHandle);
goto lookup_out;
}
cfile = cifs_new_fileinfo(fileHandle, filp, tlink,
oplock);
if (cfile == NULL) {
fput(filp);
CIFSSMBClose(xid, pTcon, fileHandle);
rc = -ENOMEM;
goto lookup_out;
}
}
/* since paths are not looked up by component - the parent
directories are presumed to be good here */
renew_parental_timestamps(direntry);
} else if (rc == -ENOENT) {
rc = 0;
direntry->d_time = jiffies;
d_add(direntry, NULL);
/* if it was once a directory (but how can we tell?) we could do
shrink_dcache_parent(direntry); */
} else if (rc != -EACCES) {
cERROR(1, "Unexpected lookup error %d", rc);
/* We special case check for Access Denied - since that
is a common return code */
}
lookup_out:
kfree(full_path);
cifs_put_tlink(tlink);
FreeXid(xid);
return ERR_PTR(rc);
}
| 165,645 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: ikev1_t_print(netdissect_options *ndo, u_char tpay _U_,
const struct isakmp_gen *ext, u_int item_len,
const u_char *ep, uint32_t phase _U_, uint32_t doi _U_,
uint32_t proto, int depth _U_)
{
const struct ikev1_pl_t *p;
struct ikev1_pl_t t;
const u_char *cp;
const char *idstr;
const struct attrmap *map;
size_t nmap;
const u_char *ep2;
ND_PRINT((ndo,"%s:", NPSTR(ISAKMP_NPTYPE_T)));
p = (const struct ikev1_pl_t *)ext;
ND_TCHECK(*p);
UNALIGNED_MEMCPY(&t, ext, sizeof(t));
switch (proto) {
case 1:
idstr = STR_OR_ID(t.t_id, ikev1_p_map);
map = oakley_t_map;
nmap = sizeof(oakley_t_map)/sizeof(oakley_t_map[0]);
break;
case 2:
idstr = STR_OR_ID(t.t_id, ah_p_map);
map = ipsec_t_map;
nmap = sizeof(ipsec_t_map)/sizeof(ipsec_t_map[0]);
break;
case 3:
idstr = STR_OR_ID(t.t_id, esp_p_map);
map = ipsec_t_map;
nmap = sizeof(ipsec_t_map)/sizeof(ipsec_t_map[0]);
break;
case 4:
idstr = STR_OR_ID(t.t_id, ipcomp_p_map);
map = ipsec_t_map;
nmap = sizeof(ipsec_t_map)/sizeof(ipsec_t_map[0]);
break;
default:
idstr = NULL;
map = NULL;
nmap = 0;
break;
}
if (idstr)
ND_PRINT((ndo," #%d id=%s ", t.t_no, idstr));
else
ND_PRINT((ndo," #%d id=%d ", t.t_no, t.t_id));
cp = (const u_char *)(p + 1);
ep2 = (const u_char *)p + item_len;
while (cp < ep && cp < ep2) {
if (map && nmap) {
cp = ikev1_attrmap_print(ndo, cp, (ep < ep2) ? ep : ep2,
map, nmap);
} else
cp = ikev1_attr_print(ndo, cp, (ep < ep2) ? ep : ep2);
}
if (ep < ep2)
ND_PRINT((ndo,"..."));
return cp;
trunc:
ND_PRINT((ndo," [|%s]", NPSTR(ISAKMP_NPTYPE_T)));
return NULL;
}
Commit Message: CVE-2017-13039/IKEv1: Do more bounds checking.
Have ikev1_attrmap_print() and ikev1_attr_print() do full bounds
checking, and return null on a bounds overflow. Have their callers
check for a null return.
This fixes a buffer over-read discovered by Bhargava Shastry,
SecT/TU Berlin.
Add a test using the capture file supplied by the reporter(s), modified
so the capture file won't be rejected as an invalid capture.
CWE ID: CWE-125 | ikev1_t_print(netdissect_options *ndo, u_char tpay _U_,
const struct isakmp_gen *ext, u_int item_len,
const u_char *ep, uint32_t phase _U_, uint32_t doi _U_,
uint32_t proto, int depth _U_)
{
const struct ikev1_pl_t *p;
struct ikev1_pl_t t;
const u_char *cp;
const char *idstr;
const struct attrmap *map;
size_t nmap;
const u_char *ep2;
ND_PRINT((ndo,"%s:", NPSTR(ISAKMP_NPTYPE_T)));
p = (const struct ikev1_pl_t *)ext;
ND_TCHECK(*p);
UNALIGNED_MEMCPY(&t, ext, sizeof(t));
switch (proto) {
case 1:
idstr = STR_OR_ID(t.t_id, ikev1_p_map);
map = oakley_t_map;
nmap = sizeof(oakley_t_map)/sizeof(oakley_t_map[0]);
break;
case 2:
idstr = STR_OR_ID(t.t_id, ah_p_map);
map = ipsec_t_map;
nmap = sizeof(ipsec_t_map)/sizeof(ipsec_t_map[0]);
break;
case 3:
idstr = STR_OR_ID(t.t_id, esp_p_map);
map = ipsec_t_map;
nmap = sizeof(ipsec_t_map)/sizeof(ipsec_t_map[0]);
break;
case 4:
idstr = STR_OR_ID(t.t_id, ipcomp_p_map);
map = ipsec_t_map;
nmap = sizeof(ipsec_t_map)/sizeof(ipsec_t_map[0]);
break;
default:
idstr = NULL;
map = NULL;
nmap = 0;
break;
}
if (idstr)
ND_PRINT((ndo," #%d id=%s ", t.t_no, idstr));
else
ND_PRINT((ndo," #%d id=%d ", t.t_no, t.t_id));
cp = (const u_char *)(p + 1);
ep2 = (const u_char *)p + item_len;
while (cp < ep && cp < ep2) {
if (map && nmap)
cp = ikev1_attrmap_print(ndo, cp, ep2, map, nmap);
else
cp = ikev1_attr_print(ndo, cp, ep2);
if (cp == NULL)
goto trunc;
}
if (ep < ep2)
ND_PRINT((ndo,"..."));
return cp;
trunc:
ND_PRINT((ndo," [|%s]", NPSTR(ISAKMP_NPTYPE_T)));
return NULL;
}
| 167,842 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
{
int error = 0;
struct cxio_rdev *rdev;
rdev = (struct cxio_rdev *)tdev->ulp;
if (cxio_fatal_error(rdev)) {
kfree_skb(skb);
return -EIO;
}
error = cxgb3_ofld_send(tdev, skb);
if (error < 0)
kfree_skb(skb);
return error;
}
Commit Message: iw_cxgb3: Fix incorrectly returning error on success
The cxgb3_*_send() functions return NET_XMIT_ values, which are
positive integers values. So don't treat positive return values
as an error.
Signed-off-by: Steve Wise <[email protected]>
Signed-off-by: Hariprasad Shenai <[email protected]>
Signed-off-by: Doug Ledford <[email protected]>
CWE ID: | int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
{
int error = 0;
struct cxio_rdev *rdev;
rdev = (struct cxio_rdev *)tdev->ulp;
if (cxio_fatal_error(rdev)) {
kfree_skb(skb);
return -EIO;
}
error = cxgb3_ofld_send(tdev, skb);
if (error < 0)
kfree_skb(skb);
return error < 0 ? error : 0;
}
| 167,495 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: ContentEncoding::~ContentEncoding() {
ContentCompression** comp_i = compression_entries_;
ContentCompression** const comp_j = compression_entries_end_;
while (comp_i != comp_j) {
ContentCompression* const comp = *comp_i++;
delete comp;
}
delete [] compression_entries_;
ContentEncryption** enc_i = encryption_entries_;
ContentEncryption** const enc_j = encryption_entries_end_;
while (enc_i != enc_j) {
ContentEncryption* const enc = *enc_i++;
delete enc;
}
delete [] encryption_entries_;
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | ContentEncoding::~ContentEncoding() {
ContentCompression** comp_i = compression_entries_;
ContentCompression** const comp_j = compression_entries_end_;
while (comp_i != comp_j) {
ContentCompression* const comp = *comp_i++;
delete comp;
}
delete[] compression_entries_;
ContentEncryption** enc_i = encryption_entries_;
ContentEncryption** const enc_j = encryption_entries_end_;
while (enc_i != enc_j) {
ContentEncryption* const enc = *enc_i++;
delete enc;
}
delete[] encryption_entries_;
}
| 174,460 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
{
struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
struct nfs_getaclargs args = {
.fh = NFS_FH(inode),
.acl_pages = pages,
.acl_len = buflen,
};
struct nfs_getaclres res = {
.acl_len = buflen,
};
void *resp_buf;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
.rpc_argp = &args,
.rpc_resp = &res,
};
int ret = -ENOMEM, npages, i, acl_len = 0;
npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* As long as we're doing a round trip to the server anyway,
* let's be prepared for a page of acl data. */
if (npages == 0)
npages = 1;
for (i = 0; i < npages; i++) {
pages[i] = alloc_page(GFP_KERNEL);
if (!pages[i])
goto out_free;
}
if (npages > 1) {
/* for decoding across pages */
res.acl_scratch = alloc_page(GFP_KERNEL);
if (!res.acl_scratch)
goto out_free;
}
args.acl_len = npages * PAGE_SIZE;
args.acl_pgbase = 0;
/* Let decode_getfacl know not to fail if the ACL data is larger than
* the page we send as a guess */
if (buf == NULL)
res.acl_flags |= NFS4_ACL_LEN_REQUEST;
resp_buf = page_address(pages[0]);
dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
__func__, buf, buflen, npages, args.acl_len);
ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
&msg, &args.seq_args, &res.seq_res, 0);
if (ret)
goto out_free;
acl_len = res.acl_len - res.acl_data_offset;
if (acl_len > args.acl_len)
nfs4_write_cached_acl(inode, NULL, acl_len);
else
nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset,
acl_len);
if (buf) {
ret = -ERANGE;
if (acl_len > buflen)
goto out_free;
_copy_from_pages(buf, pages, res.acl_data_offset,
res.acl_len);
}
ret = acl_len;
out_free:
for (i = 0; i < npages; i++)
if (pages[i])
__free_page(pages[i]);
if (res.acl_scratch)
__free_page(res.acl_scratch);
return ret;
}
Commit Message: Fix length of buffer copied in __nfs4_get_acl_uncached
_copy_from_pages() used to copy data from the temporary buffer to the
user passed buffer is passed the wrong size parameter when copying
data. res.acl_len contains both the bitmap and acl lenghts while
acl_len contains the acl length after adjusting for the bitmap size.
Signed-off-by: Sachin Prabhu <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
CWE ID: CWE-189 | static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
{
struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
struct nfs_getaclargs args = {
.fh = NFS_FH(inode),
.acl_pages = pages,
.acl_len = buflen,
};
struct nfs_getaclres res = {
.acl_len = buflen,
};
void *resp_buf;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
.rpc_argp = &args,
.rpc_resp = &res,
};
int ret = -ENOMEM, npages, i, acl_len = 0;
npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* As long as we're doing a round trip to the server anyway,
* let's be prepared for a page of acl data. */
if (npages == 0)
npages = 1;
for (i = 0; i < npages; i++) {
pages[i] = alloc_page(GFP_KERNEL);
if (!pages[i])
goto out_free;
}
if (npages > 1) {
/* for decoding across pages */
res.acl_scratch = alloc_page(GFP_KERNEL);
if (!res.acl_scratch)
goto out_free;
}
args.acl_len = npages * PAGE_SIZE;
args.acl_pgbase = 0;
/* Let decode_getfacl know not to fail if the ACL data is larger than
* the page we send as a guess */
if (buf == NULL)
res.acl_flags |= NFS4_ACL_LEN_REQUEST;
resp_buf = page_address(pages[0]);
dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
__func__, buf, buflen, npages, args.acl_len);
ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
&msg, &args.seq_args, &res.seq_res, 0);
if (ret)
goto out_free;
acl_len = res.acl_len - res.acl_data_offset;
if (acl_len > args.acl_len)
nfs4_write_cached_acl(inode, NULL, acl_len);
else
nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset,
acl_len);
if (buf) {
ret = -ERANGE;
if (acl_len > buflen)
goto out_free;
_copy_from_pages(buf, pages, res.acl_data_offset,
acl_len);
}
ret = acl_len;
out_free:
for (i = 0; i < npages; i++)
if (pages[i])
__free_page(pages[i]);
if (res.acl_scratch)
__free_page(res.acl_scratch);
return ret;
}
| 165,598 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static Image *ReadTXTImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
char
colorspace[MaxTextExtent],
text[MaxTextExtent];
Image
*image;
IndexPacket
*indexes;
long
type,
x_offset,
y,
y_offset;
MagickBooleanType
status;
MagickPixelPacket
pixel;
QuantumAny
range;
register ssize_t
i,
x;
register PixelPacket
*q;
ssize_t
count;
unsigned long
depth,
height,
max_value,
width;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
(void) ResetMagickMemory(text,0,sizeof(text));
(void) ReadBlobString(image,text);
if (LocaleNCompare((char *) text,MagickID,strlen(MagickID)) != 0)
return(ReadTEXTImage(image_info,image,text,exception));
do
{
width=0;
height=0;
max_value=0;
*colorspace='\0';
count=(ssize_t) sscanf(text+32,"%lu,%lu,%lu,%s",&width,&height,&max_value,
colorspace);
if ((count != 4) || (width == 0) || (height == 0) || (max_value == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
image->columns=width;
image->rows=height;
for (depth=1; (GetQuantumRange(depth)+1) < max_value; depth++) ;
image->depth=depth;
LocaleLower(colorspace);
i=(ssize_t) strlen(colorspace)-1;
image->matte=MagickFalse;
if ((i > 0) && (colorspace[i] == 'a'))
{
colorspace[i]='\0';
image->matte=MagickTrue;
}
type=ParseCommandOption(MagickColorspaceOptions,MagickFalse,colorspace);
if (type < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
image->colorspace=(ColorspaceType) type;
(void) ResetMagickMemory(&pixel,0,sizeof(pixel));
(void) SetImageBackgroundColor(image);
range=GetQuantumRange(image->depth);
for (y=0; y < (ssize_t) image->rows; y++)
{
double
blue,
green,
index,
opacity,
red;
red=0.0;
green=0.0;
blue=0.0;
index=0.0;
opacity=0.0;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (ReadBlobString(image,text) == (char *) NULL)
break;
switch (image->colorspace)
{
case GRAYColorspace:
{
if (image->matte != MagickFalse)
{
count=(ssize_t) sscanf(text,"%ld,%ld: (%lf%*[%,]%lf%*[%,]",
&x_offset,&y_offset,&red,&opacity);
green=red;
blue=red;
break;
}
count=(ssize_t) sscanf(text,"%ld,%ld: (%lf%*[%,]",&x_offset,
&y_offset,&red);
green=red;
blue=red;
break;
}
case CMYKColorspace:
{
if (image->matte != MagickFalse)
{
count=(ssize_t) sscanf(text,
"%ld,%ld: (%lf%*[%,]%lf%*[%,]%lf%*[%,]%lf%*[%,]%lf%*[%,]",
&x_offset,&y_offset,&red,&green,&blue,&index,&opacity);
break;
}
count=(ssize_t) sscanf(text,
"%ld,%ld: (%lf%*[%,]%lf%*[%,]%lf%*[%,]%lf%*[%,]",&x_offset,
&y_offset,&red,&green,&blue,&index);
break;
}
default:
{
if (image->matte != MagickFalse)
{
count=(ssize_t) sscanf(text,
"%ld,%ld: (%lf%*[%,]%lf%*[%,]%lf%*[%,]%lf%*[%,]",
&x_offset,&y_offset,&red,&green,&blue,&opacity);
break;
}
count=(ssize_t) sscanf(text,
"%ld,%ld: (%lf%*[%,]%lf%*[%,]%lf%*[%,]",&x_offset,&y_offset,
&red,&green,&blue);
break;
}
}
if (strchr(text,'%') != (char *) NULL)
{
red*=0.01*range;
green*=0.01*range;
blue*=0.01*range;
index*=0.01*range;
opacity*=0.01*range;
}
if (image->colorspace == LabColorspace)
{
green+=(range+1)/2.0;
blue+=(range+1)/2.0;
}
pixel.red=ScaleAnyToQuantum((QuantumAny) (red+0.5),range);
pixel.green=ScaleAnyToQuantum((QuantumAny) (green+0.5),range);
pixel.blue=ScaleAnyToQuantum((QuantumAny) (blue+0.5),range);
pixel.index=ScaleAnyToQuantum((QuantumAny) (index+0.5),range);
pixel.opacity=ScaleAnyToQuantum((QuantumAny) (opacity+0.5),range);
q=GetAuthenticPixels(image,x_offset,y_offset,1,1,exception);
if (q == (PixelPacket *) NULL)
continue;
SetPixelRed(q,pixel.red);
SetPixelGreen(q,pixel.green);
SetPixelBlue(q,pixel.blue);
if (image->colorspace == CMYKColorspace)
{
indexes=GetAuthenticIndexQueue(image);
SetPixelIndex(indexes,pixel.index);
}
if (image->matte != MagickFalse)
SetPixelAlpha(q,pixel.opacity);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
}
(void) ReadBlobString(image,text);
if (LocaleNCompare((char *) text,MagickID,strlen(MagickID)) == 0)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (LocaleNCompare((char *) text,MagickID,strlen(MagickID)) == 0);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
Commit Message:
CWE ID: CWE-119 | static Image *ReadTXTImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
char
colorspace[MaxTextExtent],
text[MaxTextExtent];
Image
*image;
IndexPacket
*indexes;
long
type,
x_offset,
y,
y_offset;
MagickBooleanType
status;
MagickPixelPacket
pixel;
QuantumAny
range;
register ssize_t
i,
x;
register PixelPacket
*q;
ssize_t
count;
unsigned long
depth,
height,
max_value,
width;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
(void) ResetMagickMemory(text,0,sizeof(text));
(void) ReadBlobString(image,text);
if (LocaleNCompare((char *) text,MagickID,strlen(MagickID)) != 0)
return(ReadTEXTImage(image_info,image,text,exception));
do
{
width=0;
height=0;
max_value=0;
*colorspace='\0';
count=(ssize_t) sscanf(text+32,"%lu,%lu,%lu,%s",&width,&height,&max_value,
colorspace);
if ((count != 4) || (width == 0) || (height == 0) || (max_value == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
image->columns=width;
image->rows=height;
for (depth=1; (GetQuantumRange(depth)+1) < max_value; depth++) ;
image->depth=depth;
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
LocaleLower(colorspace);
i=(ssize_t) strlen(colorspace)-1;
image->matte=MagickFalse;
if ((i > 0) && (colorspace[i] == 'a'))
{
colorspace[i]='\0';
image->matte=MagickTrue;
}
type=ParseCommandOption(MagickColorspaceOptions,MagickFalse,colorspace);
if (type < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
image->colorspace=(ColorspaceType) type;
(void) ResetMagickMemory(&pixel,0,sizeof(pixel));
(void) SetImageBackgroundColor(image);
range=GetQuantumRange(image->depth);
for (y=0; y < (ssize_t) image->rows; y++)
{
double
blue,
green,
index,
opacity,
red;
red=0.0;
green=0.0;
blue=0.0;
index=0.0;
opacity=0.0;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (ReadBlobString(image,text) == (char *) NULL)
break;
switch (image->colorspace)
{
case GRAYColorspace:
{
if (image->matte != MagickFalse)
{
count=(ssize_t) sscanf(text,"%ld,%ld: (%lf%*[%,]%lf%*[%,]",
&x_offset,&y_offset,&red,&opacity);
green=red;
blue=red;
break;
}
count=(ssize_t) sscanf(text,"%ld,%ld: (%lf%*[%,]",&x_offset,
&y_offset,&red);
green=red;
blue=red;
break;
}
case CMYKColorspace:
{
if (image->matte != MagickFalse)
{
count=(ssize_t) sscanf(text,
"%ld,%ld: (%lf%*[%,]%lf%*[%,]%lf%*[%,]%lf%*[%,]%lf%*[%,]",
&x_offset,&y_offset,&red,&green,&blue,&index,&opacity);
break;
}
count=(ssize_t) sscanf(text,
"%ld,%ld: (%lf%*[%,]%lf%*[%,]%lf%*[%,]%lf%*[%,]",&x_offset,
&y_offset,&red,&green,&blue,&index);
break;
}
default:
{
if (image->matte != MagickFalse)
{
count=(ssize_t) sscanf(text,
"%ld,%ld: (%lf%*[%,]%lf%*[%,]%lf%*[%,]%lf%*[%,]",
&x_offset,&y_offset,&red,&green,&blue,&opacity);
break;
}
count=(ssize_t) sscanf(text,
"%ld,%ld: (%lf%*[%,]%lf%*[%,]%lf%*[%,]",&x_offset,&y_offset,
&red,&green,&blue);
break;
}
}
if (strchr(text,'%') != (char *) NULL)
{
red*=0.01*range;
green*=0.01*range;
blue*=0.01*range;
index*=0.01*range;
opacity*=0.01*range;
}
if (image->colorspace == LabColorspace)
{
green+=(range+1)/2.0;
blue+=(range+1)/2.0;
}
pixel.red=ScaleAnyToQuantum((QuantumAny) (red+0.5),range);
pixel.green=ScaleAnyToQuantum((QuantumAny) (green+0.5),range);
pixel.blue=ScaleAnyToQuantum((QuantumAny) (blue+0.5),range);
pixel.index=ScaleAnyToQuantum((QuantumAny) (index+0.5),range);
pixel.opacity=ScaleAnyToQuantum((QuantumAny) (opacity+0.5),range);
q=GetAuthenticPixels(image,x_offset,y_offset,1,1,exception);
if (q == (PixelPacket *) NULL)
continue;
SetPixelRed(q,pixel.red);
SetPixelGreen(q,pixel.green);
SetPixelBlue(q,pixel.blue);
if (image->colorspace == CMYKColorspace)
{
indexes=GetAuthenticIndexQueue(image);
SetPixelIndex(indexes,pixel.index);
}
if (image->matte != MagickFalse)
SetPixelAlpha(q,pixel.opacity);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
}
(void) ReadBlobString(image,text);
if (LocaleNCompare((char *) text,MagickID,strlen(MagickID)) == 0)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (LocaleNCompare((char *) text,MagickID,strlen(MagickID)) == 0);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
| 168,614 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
size_t msg_len)
{
struct sock *sk = asoc->base.sk;
int err = 0;
long current_timeo = *timeo_p;
DEFINE_WAIT(wait);
pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
*timeo_p, msg_len);
/* Increment the association's refcnt. */
sctp_association_hold(asoc);
/* Wait on the association specific sndbuf space. */
for (;;) {
prepare_to_wait_exclusive(&asoc->wait, &wait,
TASK_INTERRUPTIBLE);
if (!*timeo_p)
goto do_nonblock;
if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
asoc->base.dead)
goto do_error;
if (signal_pending(current))
goto do_interrupted;
if (msg_len <= sctp_wspace(asoc))
break;
/* Let another process have a go. Since we are going
* to sleep anyway.
*/
release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
if (sk != asoc->base.sk)
goto do_error;
lock_sock(sk);
*timeo_p = current_timeo;
}
out:
finish_wait(&asoc->wait, &wait);
/* Release the association's refcnt. */
sctp_association_put(asoc);
return err;
do_error:
err = -EPIPE;
goto out;
do_interrupted:
err = sock_intr_errno(*timeo_p);
goto out;
do_nonblock:
err = -EAGAIN;
goto out;
}
Commit Message: sctp: deny peeloff operation on asocs with threads sleeping on it
commit 2dcab5984841 ("sctp: avoid BUG_ON on sctp_wait_for_sndbuf")
attempted to avoid a BUG_ON call when the association being used for a
sendmsg() is blocked waiting for more sndbuf and another thread did a
peeloff operation on such asoc, moving it to another socket.
As Ben Hutchings noticed, then in such case it would return without
locking back the socket and would cause two unlocks in a row.
Further analysis also revealed that it could allow a double free if the
application managed to peeloff the asoc that is created during the
sendmsg call, because then sctp_sendmsg() would try to free the asoc
that was created only for that call.
This patch takes another approach. It will deny the peeloff operation
if there is a thread sleeping on the asoc, so this situation doesn't
exist anymore. This avoids the issues described above and also honors
the syscalls that are already being handled (it can be multiple sendmsg
calls).
Joint work with Xin Long.
Fixes: 2dcab5984841 ("sctp: avoid BUG_ON on sctp_wait_for_sndbuf")
Cc: Alexander Popov <[email protected]>
Cc: Ben Hutchings <[email protected]>
Signed-off-by: Marcelo Ricardo Leitner <[email protected]>
Signed-off-by: Xin Long <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-415 | static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
size_t msg_len)
{
struct sock *sk = asoc->base.sk;
int err = 0;
long current_timeo = *timeo_p;
DEFINE_WAIT(wait);
pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
*timeo_p, msg_len);
/* Increment the association's refcnt. */
sctp_association_hold(asoc);
/* Wait on the association specific sndbuf space. */
for (;;) {
prepare_to_wait_exclusive(&asoc->wait, &wait,
TASK_INTERRUPTIBLE);
if (!*timeo_p)
goto do_nonblock;
if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
asoc->base.dead)
goto do_error;
if (signal_pending(current))
goto do_interrupted;
if (msg_len <= sctp_wspace(asoc))
break;
/* Let another process have a go. Since we are going
* to sleep anyway.
*/
release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
lock_sock(sk);
*timeo_p = current_timeo;
}
out:
finish_wait(&asoc->wait, &wait);
/* Release the association's refcnt. */
sctp_association_put(asoc);
return err;
do_error:
err = -EPIPE;
goto out;
do_interrupted:
err = sock_intr_errno(*timeo_p);
goto out;
do_nonblock:
err = -EAGAIN;
goto out;
}
| 168,343 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: EncodedJSValue JSC_HOST_CALL jsTestMediaQueryListListenerPrototypeFunctionMethod(ExecState* exec)
{
JSValue thisValue = exec->hostThisValue();
if (!thisValue.inherits(&JSTestMediaQueryListListener::s_info))
return throwVMTypeError(exec);
JSTestMediaQueryListListener* castedThis = jsCast<JSTestMediaQueryListListener*>(asObject(thisValue));
ASSERT_GC_OBJECT_INHERITS(castedThis, &JSTestMediaQueryListListener::s_info);
TestMediaQueryListListener* impl = static_cast<TestMediaQueryListListener*>(castedThis->impl());
if (exec->argumentCount() < 1)
return throwVMError(exec, createTypeError(exec, "Not enough arguments"));
RefPtr<MediaQueryListListener> listener(MediaQueryListListener::create(ScriptValue(exec->globalData(), MAYBE_MISSING_PARAMETER(exec, 0, DefaultIsUndefined))));
if (exec->hadException())
return JSValue::encode(jsUndefined());
impl->method(listener);
return JSValue::encode(jsUndefined());
}
Commit Message: [JSC] Implement a helper method createNotEnoughArgumentsError()
https://bugs.webkit.org/show_bug.cgi?id=85102
Reviewed by Geoffrey Garen.
In bug 84787, kbr@ requested to avoid hard-coding
createTypeError(exec, "Not enough arguments") here and there.
This patch implements createNotEnoughArgumentsError(exec)
and uses it in JSC bindings.
c.f. a corresponding bug for V8 bindings is bug 85097.
Source/JavaScriptCore:
* runtime/Error.cpp:
(JSC::createNotEnoughArgumentsError):
(JSC):
* runtime/Error.h:
(JSC):
Source/WebCore:
Test: bindings/scripts/test/TestObj.idl
* bindings/scripts/CodeGeneratorJS.pm: Modified as described above.
(GenerateArgumentsCountCheck):
* bindings/js/JSDataViewCustom.cpp: Ditto.
(WebCore::getDataViewMember):
(WebCore::setDataViewMember):
* bindings/js/JSDeprecatedPeerConnectionCustom.cpp:
(WebCore::JSDeprecatedPeerConnectionConstructor::constructJSDeprecatedPeerConnection):
* bindings/js/JSDirectoryEntryCustom.cpp:
(WebCore::JSDirectoryEntry::getFile):
(WebCore::JSDirectoryEntry::getDirectory):
* bindings/js/JSSharedWorkerCustom.cpp:
(WebCore::JSSharedWorkerConstructor::constructJSSharedWorker):
* bindings/js/JSWebKitMutationObserverCustom.cpp:
(WebCore::JSWebKitMutationObserverConstructor::constructJSWebKitMutationObserver):
(WebCore::JSWebKitMutationObserver::observe):
* bindings/js/JSWorkerCustom.cpp:
(WebCore::JSWorkerConstructor::constructJSWorker):
* bindings/scripts/test/JS/JSFloat64Array.cpp: Updated run-bindings-tests.
(WebCore::jsFloat64ArrayPrototypeFunctionFoo):
* bindings/scripts/test/JS/JSTestActiveDOMObject.cpp:
(WebCore::jsTestActiveDOMObjectPrototypeFunctionExcitingFunction):
(WebCore::jsTestActiveDOMObjectPrototypeFunctionPostMessage):
* bindings/scripts/test/JS/JSTestCustomNamedGetter.cpp:
(WebCore::jsTestCustomNamedGetterPrototypeFunctionAnotherFunction):
* bindings/scripts/test/JS/JSTestEventTarget.cpp:
(WebCore::jsTestEventTargetPrototypeFunctionItem):
(WebCore::jsTestEventTargetPrototypeFunctionAddEventListener):
(WebCore::jsTestEventTargetPrototypeFunctionRemoveEventListener):
(WebCore::jsTestEventTargetPrototypeFunctionDispatchEvent):
* bindings/scripts/test/JS/JSTestInterface.cpp:
(WebCore::JSTestInterfaceConstructor::constructJSTestInterface):
(WebCore::jsTestInterfacePrototypeFunctionSupplementalMethod2):
* bindings/scripts/test/JS/JSTestMediaQueryListListener.cpp:
(WebCore::jsTestMediaQueryListListenerPrototypeFunctionMethod):
* bindings/scripts/test/JS/JSTestNamedConstructor.cpp:
(WebCore::JSTestNamedConstructorNamedConstructor::constructJSTestNamedConstructor):
* bindings/scripts/test/JS/JSTestObj.cpp:
(WebCore::JSTestObjConstructor::constructJSTestObj):
(WebCore::jsTestObjPrototypeFunctionVoidMethodWithArgs):
(WebCore::jsTestObjPrototypeFunctionIntMethodWithArgs):
(WebCore::jsTestObjPrototypeFunctionObjMethodWithArgs):
(WebCore::jsTestObjPrototypeFunctionMethodWithSequenceArg):
(WebCore::jsTestObjPrototypeFunctionMethodReturningSequence):
(WebCore::jsTestObjPrototypeFunctionMethodThatRequiresAllArgsAndThrows):
(WebCore::jsTestObjPrototypeFunctionSerializedValue):
(WebCore::jsTestObjPrototypeFunctionIdbKey):
(WebCore::jsTestObjPrototypeFunctionOptionsObject):
(WebCore::jsTestObjPrototypeFunctionAddEventListener):
(WebCore::jsTestObjPrototypeFunctionRemoveEventListener):
(WebCore::jsTestObjPrototypeFunctionMethodWithNonOptionalArgAndOptionalArg):
(WebCore::jsTestObjPrototypeFunctionMethodWithNonOptionalArgAndTwoOptionalArgs):
(WebCore::jsTestObjPrototypeFunctionMethodWithCallbackArg):
(WebCore::jsTestObjPrototypeFunctionMethodWithNonCallbackArgAndCallbackArg):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod1):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod2):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod3):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod4):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod5):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod6):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod7):
(WebCore::jsTestObjConstructorFunctionClassMethod2):
(WebCore::jsTestObjConstructorFunctionOverloadedMethod11):
(WebCore::jsTestObjConstructorFunctionOverloadedMethod12):
(WebCore::jsTestObjPrototypeFunctionMethodWithUnsignedLongArray):
(WebCore::jsTestObjPrototypeFunctionConvert1):
(WebCore::jsTestObjPrototypeFunctionConvert2):
(WebCore::jsTestObjPrototypeFunctionConvert3):
(WebCore::jsTestObjPrototypeFunctionConvert4):
(WebCore::jsTestObjPrototypeFunctionConvert5):
(WebCore::jsTestObjPrototypeFunctionStrictFunction):
* bindings/scripts/test/JS/JSTestSerializedScriptValueInterface.cpp:
(WebCore::JSTestSerializedScriptValueInterfaceConstructor::constructJSTestSerializedScriptValueInterface):
(WebCore::jsTestSerializedScriptValueInterfacePrototypeFunctionAcceptTransferList):
git-svn-id: svn://svn.chromium.org/blink/trunk@115536 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-20 | EncodedJSValue JSC_HOST_CALL jsTestMediaQueryListListenerPrototypeFunctionMethod(ExecState* exec)
{
JSValue thisValue = exec->hostThisValue();
if (!thisValue.inherits(&JSTestMediaQueryListListener::s_info))
return throwVMTypeError(exec);
JSTestMediaQueryListListener* castedThis = jsCast<JSTestMediaQueryListListener*>(asObject(thisValue));
ASSERT_GC_OBJECT_INHERITS(castedThis, &JSTestMediaQueryListListener::s_info);
TestMediaQueryListListener* impl = static_cast<TestMediaQueryListListener*>(castedThis->impl());
if (exec->argumentCount() < 1)
return throwVMError(exec, createNotEnoughArgumentsError(exec));
RefPtr<MediaQueryListListener> listener(MediaQueryListListener::create(ScriptValue(exec->globalData(), MAYBE_MISSING_PARAMETER(exec, 0, DefaultIsUndefined))));
if (exec->hadException())
return JSValue::encode(jsUndefined());
impl->method(listener);
return JSValue::encode(jsUndefined());
}
| 170,576 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: BGD_DECLARE(gdImagePtr) gdImageCropThreshold(gdImagePtr im, const unsigned int color, const float threshold)
{
const int width = gdImageSX(im);
const int height = gdImageSY(im);
int x,y;
int match;
gdRect crop;
crop.x = 0;
crop.y = 0;
crop.width = 0;
crop.height = 0;
/* Pierre: crop everything sounds bad */
if (threshold > 100.0) {
return NULL;
}
/* TODO: Add gdImageGetRowPtr and works with ptr at the row level
* for the true color and palette images
* new formats will simply work with ptr
*/
match = 1;
for (y = 0; match && y < height; y++) {
for (x = 0; match && x < width; x++) {
match = (gdColorMatch(im, color, gdImageGetPixel(im, x,y), threshold)) > 0;
}
}
/* Pierre
* Nothing to do > bye
* Duplicate the image?
*/
if (y == height - 1) {
return NULL;
}
crop.y = y -1;
match = 1;
for (y = height - 1; match && y >= 0; y--) {
for (x = 0; match && x < width; x++) {
match = (gdColorMatch(im, color, gdImageGetPixel(im, x, y), threshold)) > 0;
}
}
if (y == 0) {
crop.height = height - crop.y + 1;
} else {
crop.height = y - crop.y + 2;
}
match = 1;
for (x = 0; match && x < width; x++) {
for (y = 0; match && y < crop.y + crop.height - 1; y++) {
match = (gdColorMatch(im, color, gdImageGetPixel(im, x,y), threshold)) > 0;
}
}
crop.x = x - 1;
match = 1;
for (x = width - 1; match && x >= 0; x--) {
for (y = 0; match && y < crop.y + crop.height - 1; y++) {
match = (gdColorMatch(im, color, gdImageGetPixel(im, x,y), threshold)) > 0;
}
}
crop.width = x - crop.x + 2;
return gdImageCrop(im, &crop);
}
Commit Message: fix php 72494, invalid color index not handled, can lead to crash
CWE ID: CWE-20 | BGD_DECLARE(gdImagePtr) gdImageCropThreshold(gdImagePtr im, const unsigned int color, const float threshold)
{
const int width = gdImageSX(im);
const int height = gdImageSY(im);
int x,y;
int match;
gdRect crop;
crop.x = 0;
crop.y = 0;
crop.width = 0;
crop.height = 0;
/* Pierre: crop everything sounds bad */
if (threshold > 100.0) {
return NULL;
}
if (color < 0 || (!gdImageTrueColor(im) && color >= gdImageColorsTotal(im))) {
return NULL;
}
/* TODO: Add gdImageGetRowPtr and works with ptr at the row level
* for the true color and palette images
* new formats will simply work with ptr
*/
match = 1;
for (y = 0; match && y < height; y++) {
for (x = 0; match && x < width; x++) {
match = (gdColorMatch(im, color, gdImageGetPixel(im, x,y), threshold)) > 0;
}
}
/* Pierre
* Nothing to do > bye
* Duplicate the image?
*/
if (y == height - 1) {
return NULL;
}
crop.y = y -1;
match = 1;
for (y = height - 1; match && y >= 0; y--) {
for (x = 0; match && x < width; x++) {
match = (gdColorMatch(im, color, gdImageGetPixel(im, x, y), threshold)) > 0;
}
}
if (y == 0) {
crop.height = height - crop.y + 1;
} else {
crop.height = y - crop.y + 2;
}
match = 1;
for (x = 0; match && x < width; x++) {
for (y = 0; match && y < crop.y + crop.height - 1; y++) {
match = (gdColorMatch(im, color, gdImageGetPixel(im, x,y), threshold)) > 0;
}
}
crop.x = x - 1;
match = 1;
for (x = width - 1; match && x >= 0; x--) {
for (y = 0; match && y < crop.y + crop.height - 1; y++) {
match = (gdColorMatch(im, color, gdImageGetPixel(im, x,y), threshold)) > 0;
}
}
crop.width = x - crop.x + 2;
return gdImageCrop(im, &crop);
}
| 169,944 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: safecat_color_encoding(char *buffer, size_t bufsize, size_t pos,
PNG_CONST color_encoding *e, double encoding_gamma)
{
if (e != 0)
{
if (encoding_gamma != 0)
pos = safecat(buffer, bufsize, pos, "(");
pos = safecat(buffer, bufsize, pos, "R(");
pos = safecatd(buffer, bufsize, pos, e->red.X, 4);
pos = safecat(buffer, bufsize, pos, ",");
pos = safecatd(buffer, bufsize, pos, e->red.Y, 4);
pos = safecat(buffer, bufsize, pos, ",");
pos = safecatd(buffer, bufsize, pos, e->red.Z, 4);
pos = safecat(buffer, bufsize, pos, "),G(");
pos = safecatd(buffer, bufsize, pos, e->green.X, 4);
pos = safecat(buffer, bufsize, pos, ",");
pos = safecatd(buffer, bufsize, pos, e->green.Y, 4);
pos = safecat(buffer, bufsize, pos, ",");
pos = safecatd(buffer, bufsize, pos, e->green.Z, 4);
pos = safecat(buffer, bufsize, pos, "),B(");
pos = safecatd(buffer, bufsize, pos, e->blue.X, 4);
pos = safecat(buffer, bufsize, pos, ",");
pos = safecatd(buffer, bufsize, pos, e->blue.Y, 4);
pos = safecat(buffer, bufsize, pos, ",");
pos = safecatd(buffer, bufsize, pos, e->blue.Z, 4);
pos = safecat(buffer, bufsize, pos, ")");
if (encoding_gamma != 0)
pos = safecat(buffer, bufsize, pos, ")");
}
if (encoding_gamma != 0)
{
pos = safecat(buffer, bufsize, pos, "^");
pos = safecatd(buffer, bufsize, pos, encoding_gamma, 5);
}
return pos;
}
Commit Message: DO NOT MERGE Update libpng to 1.6.20
BUG:23265085
Change-Id: I85199805636d771f3597b691b63bc0bf46084833
(cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
CWE ID: | safecat_color_encoding(char *buffer, size_t bufsize, size_t pos,
const color_encoding *e, double encoding_gamma)
{
if (e != 0)
{
if (encoding_gamma != 0)
pos = safecat(buffer, bufsize, pos, "(");
pos = safecat(buffer, bufsize, pos, "R(");
pos = safecatd(buffer, bufsize, pos, e->red.X, 4);
pos = safecat(buffer, bufsize, pos, ",");
pos = safecatd(buffer, bufsize, pos, e->red.Y, 4);
pos = safecat(buffer, bufsize, pos, ",");
pos = safecatd(buffer, bufsize, pos, e->red.Z, 4);
pos = safecat(buffer, bufsize, pos, "),G(");
pos = safecatd(buffer, bufsize, pos, e->green.X, 4);
pos = safecat(buffer, bufsize, pos, ",");
pos = safecatd(buffer, bufsize, pos, e->green.Y, 4);
pos = safecat(buffer, bufsize, pos, ",");
pos = safecatd(buffer, bufsize, pos, e->green.Z, 4);
pos = safecat(buffer, bufsize, pos, "),B(");
pos = safecatd(buffer, bufsize, pos, e->blue.X, 4);
pos = safecat(buffer, bufsize, pos, ",");
pos = safecatd(buffer, bufsize, pos, e->blue.Y, 4);
pos = safecat(buffer, bufsize, pos, ",");
pos = safecatd(buffer, bufsize, pos, e->blue.Z, 4);
pos = safecat(buffer, bufsize, pos, ")");
if (encoding_gamma != 0)
pos = safecat(buffer, bufsize, pos, ")");
}
if (encoding_gamma != 0)
{
pos = safecat(buffer, bufsize, pos, "^");
pos = safecatd(buffer, bufsize, pos, encoding_gamma, 5);
}
return pos;
}
| 173,690 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: spnego_gss_process_context_token(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_buffer_t token_buffer)
{
OM_uint32 ret;
ret = gss_process_context_token(minor_status,
context_handle,
token_buffer);
return (ret);
}
Commit Message: Fix SPNEGO context aliasing bugs [CVE-2015-2695]
The SPNEGO mechanism currently replaces its context handle with the
mechanism context handle upon establishment, under the assumption that
most GSS functions are only called after context establishment. This
assumption is incorrect, and can lead to aliasing violations for some
programs. Maintain the SPNEGO context structure after context
establishment and refer to it in all GSS methods. Add initiate and
opened flags to the SPNEGO context structure for use in
gss_inquire_context() prior to context establishment.
CVE-2015-2695:
In MIT krb5 1.5 and later, applications which call
gss_inquire_context() on a partially-established SPNEGO context can
cause the GSS-API library to read from a pointer using the wrong type,
generally causing a process crash. This bug may go unnoticed, because
the most common SPNEGO authentication scenario establishes the context
after just one call to gss_accept_sec_context(). Java server
applications using the native JGSS provider are vulnerable to this
bug. A carefully crafted SPNEGO packet might allow the
gss_inquire_context() call to succeed with attacker-determined
results, but applications should not make access control decisions
based on gss_inquire_context() results prior to context establishment.
CVSSv2 Vector: AV:N/AC:M/Au:N/C:N/I:N/A:C/E:POC/RL:OF/RC:C
[[email protected]: several bugfixes, style changes, and edge-case
behavior changes; commit message and CVE description]
ticket: 8244
target_version: 1.14
tags: pullup
CWE ID: CWE-18 | spnego_gss_process_context_token(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_buffer_t token_buffer)
{
OM_uint32 ret;
spnego_gss_ctx_id_t sc = (spnego_gss_ctx_id_t)context_handle;
/* SPNEGO doesn't have its own context tokens. */
if (!sc->opened)
return (GSS_S_DEFECTIVE_TOKEN);
ret = gss_process_context_token(minor_status,
sc->ctx_handle,
token_buffer);
return (ret);
}
| 166,663 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: void DevToolsUIBindings::CallClientFunction(const std::string& function_name,
const base::Value* arg1,
const base::Value* arg2,
const base::Value* arg3) {
if (!web_contents_->GetURL().SchemeIs(content::kChromeDevToolsScheme))
return;
std::string javascript = function_name + "(";
if (arg1) {
std::string json;
base::JSONWriter::Write(*arg1, &json);
javascript.append(json);
if (arg2) {
base::JSONWriter::Write(*arg2, &json);
javascript.append(", ").append(json);
if (arg3) {
base::JSONWriter::Write(*arg3, &json);
javascript.append(", ").append(json);
}
}
}
javascript.append(");");
web_contents_->GetMainFrame()->ExecuteJavaScript(
base::UTF8ToUTF16(javascript));
}
Commit Message: DevTools: move front-end URL handling to DevToolsUIBindingds
BUG=662859
Review-Url: https://codereview.chromium.org/2607833002
Cr-Commit-Position: refs/heads/master@{#440926}
CWE ID: CWE-200 | void DevToolsUIBindings::CallClientFunction(const std::string& function_name,
const base::Value* arg1,
const base::Value* arg2,
const base::Value* arg3) {
if (!web_contents_->GetURL().SchemeIs(content::kChromeDevToolsScheme))
return;
// If we're not exposing bindings, we shouldn't call functions either.
if (!frontend_host_)
return;
std::string javascript = function_name + "(";
if (arg1) {
std::string json;
base::JSONWriter::Write(*arg1, &json);
javascript.append(json);
if (arg2) {
base::JSONWriter::Write(*arg2, &json);
javascript.append(", ").append(json);
if (arg3) {
base::JSONWriter::Write(*arg3, &json);
javascript.append(", ").append(json);
}
}
}
javascript.append(");");
web_contents_->GetMainFrame()->ExecuteJavaScript(
base::UTF8ToUTF16(javascript));
}
| 172,451 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: status_t OMXNodeInstance::setParameter(
OMX_INDEXTYPE index, const void *params, size_t size) {
Mutex::Autolock autoLock(mLock);
OMX_INDEXEXTTYPE extIndex = (OMX_INDEXEXTTYPE)index;
CLOG_CONFIG(setParameter, "%s(%#x), %zu@%p)", asString(extIndex), index, size, params);
OMX_ERRORTYPE err = OMX_SetParameter(
mHandle, index, const_cast<void *>(params));
CLOG_IF_ERROR(setParameter, err, "%s(%#x)", asString(extIndex), index);
return StatusFromOMXError(err);
}
Commit Message: DO NOT MERGE: IOMX: work against metadata buffer spoofing
- Prohibit direct set/getParam/Settings for extensions meant for
OMXNodeInstance alone. This disallows enabling metadata mode
without the knowledge of OMXNodeInstance.
- Use a backup buffer for metadata mode buffers and do not directly
share with clients.
- Disallow setting up metadata mode/tunneling/input surface
after first sendCommand.
- Disallow store-meta for input cross process.
- Disallow emptyBuffer for surface input (via IOMX).
- Fix checking for input surface.
Bug: 29422020
Change-Id: I801c77b80e703903f62e42d76fd2e76a34e4bc8e
(cherry picked from commit 7c3c2fa3e233c656fc8c2fc2a6634b3ecf8a23e8)
CWE ID: CWE-200 | status_t OMXNodeInstance::setParameter(
OMX_INDEXTYPE index, const void *params, size_t size) {
Mutex::Autolock autoLock(mLock);
OMX_INDEXEXTTYPE extIndex = (OMX_INDEXEXTTYPE)index;
CLOG_CONFIG(setParameter, "%s(%#x), %zu@%p)", asString(extIndex), index, size, params);
if (isProhibitedIndex_l(index)) {
android_errorWriteLog(0x534e4554, "29422020");
return BAD_INDEX;
}
OMX_ERRORTYPE err = OMX_SetParameter(
mHandle, index, const_cast<void *>(params));
CLOG_IF_ERROR(setParameter, err, "%s(%#x)", asString(extIndex), index);
return StatusFromOMXError(err);
}
| 174,139 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static MagickBooleanType SyncExifProfile(Image *image, StringInfo *profile)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_INTEROP_OFFSET 0xa005
typedef struct _DirectoryInfo
{
unsigned char
*directory;
size_t
entry;
} DirectoryInfo;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
size_t
entry,
length,
number_entries;
SplayTreeInfo
*exif_resources;
ssize_t
id,
level,
offset;
static int
format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
unsigned char
*directory,
*exif;
/*
Set EXIF resolution tag.
*/
length=GetStringInfoLength(profile);
exif=GetStringInfoDatum(profile);
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
if ((id != 0x4949) && (id != 0x4D4D))
{
while (length != 0)
{
if (ReadProfileByte(&exif,&length) != 0x45)
continue;
if (ReadProfileByte(&exif,&length) != 0x78)
continue;
if (ReadProfileByte(&exif,&length) != 0x69)
continue;
if (ReadProfileByte(&exif,&length) != 0x66)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
}
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadProfileShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ReadProfileLong(endian,exif+4);
if ((offset < 0) || ((size_t) offset >= length))
return(MagickFalse);
directory=exif+offset;
level=0;
entry=0;
exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL,
(void *(*)(void *)) NULL,(void *(*)(void *)) NULL);
do
{
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
}
if ((directory < exif) || (directory > (exif+length-2)))
break;
/*
Determine how many entries there are in the current IFD.
*/
number_entries=ReadProfileShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
register unsigned char
*p,
*q;
size_t
number_bytes;
ssize_t
components,
format,
tag_value;
q=(unsigned char *) (directory+2+(12*entry));
if (q > (exif+length-12))
break; /* corrupt EXIF */
if (GetValueFromSplayTree(exif_resources,q) == q)
break;
(void) AddValueToSplayTree(exif_resources,q,q);
tag_value=(ssize_t) ReadProfileShort(endian,q);
format=(ssize_t) ReadProfileShort(endian,q+2);
if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS))
break;
components=(ssize_t) ReadProfileLong(endian,q+4);
if (components < 0)
break; /* corrupt EXIF */
number_bytes=(size_t) components*format_bytes[format];
if ((ssize_t) number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
/*
The directory entry contains an offset.
*/
offset=(ssize_t) ReadProfileLong(endian,q+8);
if ((ssize_t) (offset+number_bytes) < offset)
continue; /* prevent overflow */
if ((size_t) (offset+number_bytes) > length)
continue;
p=(unsigned char *) (exif+offset);
}
switch (tag_value)
{
case 0x011a:
{
(void) WriteProfileLong(endian,(size_t) (image->x_resolution+0.5),p);
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x011b:
{
(void) WriteProfileLong(endian,(size_t) (image->y_resolution+0.5),p);
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x0112:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) image->orientation,p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) image->orientation,
p);
break;
}
case 0x0128:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) (image->units+1),p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) (image->units+1),p);
break;
}
default:
break;
}
if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET))
{
offset=(ssize_t) ReadProfileLong(endian,p);
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
level++;
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)) > (exif+length))
break;
offset=(ssize_t) ReadProfileLong(endian,directory+2+(12*
number_entries));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
}
}
break;
}
}
} while (level > 0);
exif_resources=DestroySplayTree(exif_resources);
return(MagickTrue);
}
Commit Message: https://github.com/ImageMagick/ImageMagick/issues/354
CWE ID: CWE-415 | static MagickBooleanType SyncExifProfile(Image *image, StringInfo *profile)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_INTEROP_OFFSET 0xa005
typedef struct _DirectoryInfo
{
unsigned char
*directory;
size_t
entry;
} DirectoryInfo;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
size_t
entry,
length,
number_entries;
SplayTreeInfo
*exif_resources;
ssize_t
id,
level,
offset;
static int
format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
unsigned char
*directory,
*exif;
/*
Set EXIF resolution tag.
*/
length=GetStringInfoLength(profile);
exif=GetStringInfoDatum(profile);
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
if ((id != 0x4949) && (id != 0x4D4D))
{
while (length != 0)
{
if (ReadProfileByte(&exif,&length) != 0x45)
continue;
if (ReadProfileByte(&exif,&length) != 0x78)
continue;
if (ReadProfileByte(&exif,&length) != 0x69)
continue;
if (ReadProfileByte(&exif,&length) != 0x66)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
}
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadProfileShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ReadProfileLong(endian,exif+4);
if ((offset < 0) || ((size_t) offset >= length))
return(MagickFalse);
directory=exif+offset;
level=0;
entry=0;
exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL,
(void *(*)(void *)) NULL,(void *(*)(void *)) NULL);
do
{
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
}
if ((directory < exif) || (directory > (exif+length-2)))
break;
/*
Determine how many entries there are in the current IFD.
*/
number_entries=ReadProfileShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
register unsigned char
*p,
*q;
size_t
number_bytes;
ssize_t
components,
format,
tag_value;
q=(unsigned char *) (directory+2+(12*entry));
if (q > (exif+length-12))
break; /* corrupt EXIF */
if (GetValueFromSplayTree(exif_resources,q) == q)
break;
(void) AddValueToSplayTree(exif_resources,q,q);
tag_value=(ssize_t) ReadProfileShort(endian,q);
format=(ssize_t) ReadProfileShort(endian,q+2);
if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS))
break;
components=(ssize_t) ReadProfileLong(endian,q+4);
if (components < 0)
break; /* corrupt EXIF */
number_bytes=(size_t) components*format_bytes[format];
if ((ssize_t) number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
/*
The directory entry contains an offset.
*/
offset=(ssize_t) ReadProfileLong(endian,q+8);
if ((offset < 0) || ((size_t) (offset+number_bytes) > length))
continue;
if (~length < number_bytes)
continue; /* prevent overflow */
p=(unsigned char *) (exif+offset);
}
switch (tag_value)
{
case 0x011a:
{
(void) WriteProfileLong(endian,(size_t) (image->x_resolution+0.5),p);
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x011b:
{
(void) WriteProfileLong(endian,(size_t) (image->y_resolution+0.5),p);
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x0112:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) image->orientation,p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) image->orientation,
p);
break;
}
case 0x0128:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) (image->units+1),p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) (image->units+1),p);
break;
}
default:
break;
}
if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET))
{
offset=(ssize_t) ReadProfileLong(endian,p);
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
level++;
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)) > (exif+length))
break;
offset=(ssize_t) ReadProfileLong(endian,directory+2+(12*
number_entries));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
}
}
break;
}
}
} while (level > 0);
exif_resources=DestroySplayTree(exif_resources);
return(MagickTrue);
}
| 168,409 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. Remember, I want response with just the fixed code only, No explanation. | Code: static void test_show_object(struct object *object,
struct strbuf *path,
const char *last, void *data)
{
struct bitmap_test_data *tdata = data;
int bitmap_pos;
bitmap_pos = bitmap_position(object->oid.hash);
if (bitmap_pos < 0)
die("Object not in bitmap: %s\n", oid_to_hex(&object->oid));
bitmap_set(tdata->base, bitmap_pos);
display_progress(tdata->prg, ++tdata->seen);
}
Commit Message: list-objects: pass full pathname to callbacks
When we find a blob at "a/b/c", we currently pass this to
our show_object_fn callbacks as two components: "a/b/" and
"c". Callbacks which want the full value then call
path_name(), which concatenates the two. But this is an
inefficient interface; the path is a strbuf, and we could
simply append "c" to it temporarily, then roll back the
length, without creating a new copy.
So we could improve this by teaching the callsites of
path_name() this trick (and there are only 3). But we can
also notice that no callback actually cares about the
broken-down representation, and simply pass each callback
the full path "a/b/c" as a string. The callback code becomes
even simpler, then, as we do not have to worry about freeing
an allocated buffer, nor rolling back our modification to
the strbuf.
This is theoretically less efficient, as some callbacks
would not bother to format the final path component. But in
practice this is not measurable. Since we use the same
strbuf over and over, our work to grow it is amortized, and
we really only pay to memcpy a few bytes.
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]>
CWE ID: CWE-119 | static void test_show_object(struct object *object,
static void test_show_object(struct object *object, const char *name,
void *data)
{
struct bitmap_test_data *tdata = data;
int bitmap_pos;
bitmap_pos = bitmap_position(object->oid.hash);
if (bitmap_pos < 0)
die("Object not in bitmap: %s\n", oid_to_hex(&object->oid));
bitmap_set(tdata->base, bitmap_pos);
display_progress(tdata->prg, ++tdata->seen);
}
| 167,423 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.